ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a4bc40d9008c8d8d58eccb8229c442e440947b6 | from rest_framework import status
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from django.core.mail import send_mail
from django.conf import settings
from .authentication import WebpageTokenAuth
from .models import User, Comment, Edit
from .serializers import CommentSerializer, EditSerializer
@api_view(['GET'])
def form_validator(request):
"""
API call to validate the sign up form data on the client.
This validates that:
#. The chosen **Username** is not already taken.
#. The chosen **Email** is not already taken.
"""
Username = request.GET.get('Username', None)
Email = request.GET.get('Email', None)
usernameExists = User.objects.filter(Username=Username).exists()
emailExists = User.objects.filter(Email=Email).exists()
return Response({"UsernameExists": usernameExists, "EmailExists": emailExists}, status=status.HTTP_200_OK)
@api_view(['GET'])
def user_comments(request):
"""
Endpoint to get all the comments made by a user.
This expects a ``UserID`` to be provided as a query parameter.
"""
try:
comments = Comment.objects.filter(UserID=request.query_params.get('UserID'))
return Response(CommentSerializer(comments, many=True).data, status=status.HTTP_200_OK)
except Exception as e:
return Response(str(e), status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
@authentication_classes([WebpageTokenAuth])
@permission_classes([IsAuthenticated])
def comment_submit(request):
"""
Endpoint to submit a new user comment given:
- ``CommentText``: The text the user wrote.
- ``AHJPK``: The AHJ primary key of the AHJPage they commented on.
- ``ReplyingTo``: The UserID of the user who wrote the comment this comment is replying to, if any.
"""
comment_text = request.data.get('CommentText', None)
if comment_text is None:
return Response('Missing comment text', status=status.HTTP_400_BAD_REQUEST)
AHJPK = request.data.get('AHJPK', None)
ReplyingTo = request.data.get('ReplyingTo', None)
comment = Comment.objects.create(UserID=User.objects.get(Email=request.user),
AHJPK=AHJPK,
CommentText=comment_text, ReplyingTo=ReplyingTo)
# send the serialized comment back to the front-end
return Response(CommentSerializer(comment).data, status=status.HTTP_200_OK)
@api_view(['GET'])
def user_edits(request):
"""
Endpoint returning all edits made a user.
This expects a ``UserID`` to be provided as a query parameter.
"""
try:
edits = Edit.objects.filter(ChangedBy=request.query_params.get('UserID'))
return Response(EditSerializer(edits, many=True).data, status=status.HTTP_200_OK)
except Exception as e:
return Response(str(e), status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
def send_support_email(request):
"""
Endpoint to send mail to SunSpec's support email address.
This expects as POST data:
- ``Email``: The email of the user writing to SunSpec support.
- ``Subject``: The subject of the email.
- ``Message``: The body of the email.
"""
try:
email = request.data.get('Email')
subject = request.data.get('Subject')
message = request.data.get('Message')
full_message = f'Sender: {email}\nMessage: {message}'
send_mail(subject, full_message, settings.EMAIL_HOST_USER, [settings.SUNSPEC_SUPPORT_EMAIL], fail_silently=False)
return Response(status=status.HTTP_200_OK)
except Exception as e:
return Response(str(e), status=status.HTTP_400_BAD_REQUEST)
|
py | 1a4bc4e543c68c8d5c8553b92470a56a0909d14c | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
__version__ = '1.29.7'
# -----------------------------------------------------------------------------
import asyncio
import concurrent
import socket
import certifi
import aiohttp
import ssl
import sys
import yarl
# -----------------------------------------------------------------------------
from ccxt.async_support.base.throttle import throttle
# -----------------------------------------------------------------------------
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import NotSupported
# -----------------------------------------------------------------------------
from ccxt.base.exchange import Exchange as BaseExchange
# -----------------------------------------------------------------------------
__all__ = [
'BaseExchange',
'Exchange',
]
# -----------------------------------------------------------------------------
class Exchange(BaseExchange):
def __init__(self, config={}):
if 'asyncio_loop' in config:
self.asyncio_loop = config['asyncio_loop']
self.asyncio_loop = self.asyncio_loop or asyncio.get_event_loop()
self.aiohttp_trust_env = config.get('aiohttp_trust_env', self.aiohttp_trust_env)
self.verify = config.get('verify', self.verify)
self.own_session = 'session' not in config
self.cafile = config.get('cafile', certifi.where())
super(Exchange, self).__init__(config)
self.init_rest_rate_limiter()
self.markets_loading = None
self.reloading_markets = False
def init_rest_rate_limiter(self):
self.throttle = throttle(self.extend({
'loop': self.asyncio_loop,
}, self.tokenBucket))
def __del__(self):
if self.session is not None:
self.logger.warning(self.id + " requires to release all resources with an explicit call to the .close() coroutine. If you are using the exchange instance with async coroutines, add exchange.close() to your code into a place when you're done with the exchange and don't need the exchange instance anymore (at the end of your async coroutine).")
if sys.version_info >= (3, 5):
async def __aenter__(self):
self.open()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
def open(self):
if self.own_session and self.session is None:
# Create our SSL context object with our CA cert file
context = ssl.create_default_context(cafile=self.cafile) if self.verify else self.verify
# Pass this SSL context to aiohttp and create a TCPConnector
connector = aiohttp.TCPConnector(ssl=context, loop=self.asyncio_loop)
self.session = aiohttp.ClientSession(loop=self.asyncio_loop, connector=connector, trust_env=self.aiohttp_trust_env)
async def close(self):
if self.session is not None:
if self.own_session:
await self.session.close()
self.session = None
async def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
"""A better wrapper over request for deferred signing"""
if self.enableRateLimit:
await self.throttle(self.rateLimit)
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return await self.fetch(request['url'], request['method'], request['headers'], request['body'])
async def fetch(self, url, method='GET', headers=None, body=None):
"""Perform a HTTP request and return decoded JSON data"""
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
self.print("\nRequest:", method, url, headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, headers, body)
request_body = body
encoded_body = body.encode() if body else None
self.open()
session_method = getattr(self.session, method.lower())
http_response = None
http_status_code = None
http_status_text = None
json_response = None
try:
async with session_method(yarl.URL(url, encoded=True),
data=encoded_body,
headers=request_headers,
timeout=(self.timeout / 1000),
proxy=self.aiohttp_proxy) as response:
http_response = await response.text()
http_status_code = response.status
http_status_text = response.reason
json_response = self.parse_json(http_response)
headers = response.headers
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.verbose:
self.print("\nResponse:", method, url, http_status_code, headers, http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, http_status_code, headers, http_response)
except socket.gaierror as e:
raise ExchangeNotAvailable(method + ' ' + url)
except concurrent.futures._base.TimeoutError as e:
raise RequestTimeout(method + ' ' + url)
except aiohttp.client_exceptions.ClientConnectionError as e:
raise ExchangeNotAvailable(method + ' ' + url)
except aiohttp.client_exceptions.ClientError as e: # base exception class
raise ExchangeError(method + ' ' + url)
self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
self.handle_rest_errors(http_status_code, http_status_text, http_response, url, method)
self.handle_rest_response(http_response, json_response, url, method)
if json_response is not None:
return json_response
if self.is_text_response(headers):
return http_response
return response.content
async def load_markets_helper(self, reload=False, params={}):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies']:
currencies = await self.fetch_currencies()
markets = await self.fetch_markets(params)
return self.set_markets(markets, currencies)
async def load_markets(self, reload=False, params={}):
if (reload and not self.reloading_markets) or not self.markets_loading:
self.reloading_markets = True
coroutine = self.load_markets_helper(reload, params)
# coroutines can only be awaited once so we wrap it in a task
self.markets_loading = asyncio.ensure_future(coroutine)
try:
result = await self.markets_loading
except Exception as e:
self.reloading_markets = False
self.markets_loading = None
raise e
self.reloading_markets = False
return result
async def fetch_fees(self):
trading = {}
funding = {}
if self.has['fetchTradingFees']:
trading = await self.fetch_trading_fees()
if self.has['fetchFundingFees']:
funding = await self.fetch_funding_fees()
return {
'trading': trading,
'funding': funding,
}
async def load_fees(self, reload=False):
if not reload:
if self.loaded_fees != Exchange.loaded_fees:
return self.loaded_fees
self.loaded_fees = self.deep_extend(self.loaded_fees, await self.fetch_fees())
return self.loaded_fees
async def fetch_markets(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.to_array(self.markets)
async def fetch_currencies(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.currencies
async def fetch_status(self, params={}):
if self.has['fetchTime']:
updated = await self.fetch_time(params)
self.status['updated'] = updated
return self.status
async def fetch_order_status(self, id, symbol=None, params={}):
order = await self.fetch_order(id, symbol, params)
return order['status']
async def fetch_partial_balance(self, part, params={}):
balance = await self.fetch_balance(params)
return balance[part]
async def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = await self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
async def perform_order_book_request(self, market, limit=None, params={}):
raise NotSupported(self.id + ' performOrderBookRequest not supported yet')
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
orderbook = await self.perform_order_book_request(market, limit, params)
return self.parse_order_book(orderbook, market, limit, params)
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
raise NotSupported('fetch_ohlcv() not implemented yet')
await self.load_markets()
trades = await self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcv(trades, timeframe, since, limit)
async def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return await self.fetch_ohlcv(symbol, timeframe, since, limit, params)
async def fetch_full_tickers(self, symbols=None, params={}):
return await self.fetch_tickers(symbols, params)
async def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
raise ExchangeError('updateOrder() requires enableRateLimit = true')
await self.cancel_order(id, symbol)
return await self.create_order(symbol, *args)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
raise NotSupported('create_order() not supported yet')
async def cancel_order(self, id, symbol=None, params={}):
raise NotSupported('cancel_order() not supported yet')
async def fetch_trading_fees(self, params={}):
raise NotSupported('fetch_trading_fees() not supported yet')
async def fetch_trading_fee(self, symbol, params={}):
if not self.has['fetchTradingFees']:
raise NotSupported('fetch_trading_fee() not supported yet')
return await self.fetch_trading_fees(params)
async def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = await self.fetch_trading_limits(symbols)
for i in range(0, len(symbols)):
symbol = symbols[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol])
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
async def load_accounts(self, reload=False, params={}):
if reload:
self.accounts = await self.fetch_accounts(params)
else:
if self.accounts:
return self.accounts
else:
self.accounts = await self.fetch_accounts(params)
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
async def fetch_ticker(self, symbol, params={}):
raise NotSupported('fetch_ticker() not supported yet')
async def sleep(self, milliseconds):
return await asyncio.sleep(milliseconds / 1000)
|
py | 1a4bc4fa9ea7be16c03c8a1984b4d9f352607073 | import scipy.stats as st
import math
import torch
import numpy as np
import torch.nn as nn
from functools import partial
# Target function definition
def f(input_):
r"""
Bimodal function
:param x:
:return:
"""
x = input_ + 0.5
y_left = st.skewnorm(a=4, loc=.3, scale=.7).pdf(3 * x) / 1.6
y_right = st.skewnorm(a=4, loc=.3, scale=.6).pdf(3 * (1 - x)) / 1.4
return 2 * (y_left + y_right) - 1
# REPULSIVE FUNCTION
def pairwise_rbf(y_ent_pts_new, y_ent_pts_old, std_pts):
# computation of the weights
return torch.mean(torch.exp(-(1 / (2 * std_pts**2)) * torch.norm(y_ent_pts_new - y_ent_pts_old, dim=1, keepdim=True)**2))
def optimize(net, optimizer, batch, add_repulsive_constraint=False, **kwargs):
criterion = nn.MSELoss()
if add_repulsive_constraint:
criterion_repulsive = partial(pairwise_rbf, std_pts=kwargs['bandwidth_repulsive'])
info = {}
x, y = batch # x is an image and y is an integer !
output = net(x)
if not add_repulsive_constraint:
loss = criterion(output, y)
info['data_loss'] = loss.item()
else:
data_loss = criterion(output, y)
info['data_loss'] = data_loss.item()
# entropy loss
net.eval()
y_rep = net(kwargs['batch_repulsive'])
net.train()
y_rep_ref = kwargs['reference_net'](kwargs['batch_repulsive']).detach()
entropy_loss = criterion_repulsive(y_rep, y_rep_ref) # close to 1 if the probs are the same, else close to 0
info['repulsive_loss'] = entropy_loss.item()
# total loss
loss = data_loss + kwargs['lambda_repulsive'] * entropy_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
# logging
info['total_loss'] = loss.item()
return info
|
py | 1a4bc525a6b01732be35fd4084f6366fb689fd4b | DATASTORE_COLUMN_PATIENT_ID = 'PatientId'
DATASTORE_COLUMN_PATIENT_NAME = 'PatientName'
DATASTORE_COLUMN_HEALTH_CARE_PROFESSSIONAL_ID = 'HealthCareProfessionalId'
DATASTORE_COLUMN_HEALTH_CARE_PROFESSSIONAL_NAME = 'HealthCareProfessionalName'
DATASTORE_COLUMN_SESSION_ID = 'SessionId'
DATASTORE_COLUMN_SESSION_NAME = 'SessionName'
DATASTORE_COLUMN_COMPREHEND_S3_PATH = 'ComprehendS3Path'
DATASTORE_COLUMN_TRANSCRIBE_S3_PATH = 'TranscribeS3Path'
DATASTORE_COLUMN_AUDIO_S3_PATH = 'AudioS3Path'
DATASTORE_COLUMN_TIMESTAMP_START = 'TimeStampStart'
DATASTORE_COLUMN_TIMESTAMP_END = 'TimeStampEnd'
TRANSLATION_TARGET_LANGUAGE_CODE = 'TargetLanguageCode'
TRANSLATION_SOURCE_TEXT = 'TranslationSourceText'
TRANSLATION_TEXT_MAX_UTF8_BYTES_SIZE = 5000 |
py | 1a4bc55955d722e0861361db6a357b87c3ebe47a | from .pumping_amounts import pumping_amounts # NOQA
from .diaperchange_amounts import diaperchange_amounts # NOQA
from .diaperchange_lifetimes import diaperchange_lifetimes # NOQA
from .diaperchange_types import diaperchange_types # NOQA
from .feeding_amounts import feeding_amounts # NOQA
from .feeding_duration import feeding_duration # NOQA
from .sleep_pattern import sleep_pattern # NOQA
from .sleep_totals import sleep_totals # NOQA
from .tummytime_duration import tummytime_duration # NOQA
from .weight_weight import weight_weight # NOQA
from .height_height import height_height # NOQA
from .head_circumference_head_circumference import (
head_circumference_head_circumference,
) # NOQA
from .bmi_bmi import bmi_bmi # NOQA
|
py | 1a4bc5a93bedd0d0526856e38bc529e2716f5310 | # TODO: This code is comparing HyperparameterRanges_CS with HyperparameterRanges.
# If the latter code is removed, this test can go as well.
import numpy as np
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
from numpy.testing import assert_allclose
from autogluon.core.searcher import \
HyperparameterRanges_CS
from autogluon.core.searcher import \
HyperparameterRanges_Impl, HyperparameterRangeCategorical, \
HyperparameterRangeContinuous, HyperparameterRangeInteger
from autogluon.core.searcher import LinearScaling, \
LogScaling
def test_to_ndarray():
np.random.seed(123456)
random_state = np.random.RandomState(123456)
prob_categ = 0.3
for iter in range(20):
# Create ConfigurationSpace
num_hps = np.random.randint(low=1, high=20)
if iter == 0:
_prob_categ = 0.
elif iter == 1:
_prob_categ = 1.
else:
_prob_categ = prob_categ
config_space = CS.ConfigurationSpace()
ndarray_size = 0
_hp_ranges = dict()
for hp_it in range(num_hps):
name = str(hp_it)
if np.random.random() < _prob_categ:
num_choices = np.random.randint(low=2, high=11)
choices = tuple([str(i) for i in range(num_choices)])
hp = CSH.CategoricalHyperparameter(name, choices=choices)
hp2 = HyperparameterRangeCategorical(name, choices)
ndarray_size += num_choices
else:
ndarray_size += 1
rand_coin = np.random.random()
if rand_coin < 0.5:
log_scaling = (rand_coin < 0.25)
hp = CSH.UniformFloatHyperparameter(
name=name, lower=0.5, upper=5., log=log_scaling)
hp2 = HyperparameterRangeContinuous(
name, lower_bound=0.5, upper_bound=5.,
scaling=LogScaling() if log_scaling else LinearScaling())
else:
log_scaling = (rand_coin < 0.75)
hp = CSH.UniformIntegerHyperparameter(
name=name, lower=2, upper=10, log=log_scaling)
hp2 = HyperparameterRangeInteger(
name=name, lower_bound=2, upper_bound=10,
scaling=LogScaling() if log_scaling else LinearScaling())
config_space.add_hyperparameter(hp)
_hp_ranges[name] = hp2
hp_ranges_cs = HyperparameterRanges_CS(config_space)
hp_ranges = HyperparameterRanges_Impl(
*[_hp_ranges[x] for x in config_space.get_hyperparameter_names()])
# Compare ndarrays created by both codes
for cmp_it in range(5):
config_cs = hp_ranges_cs.random_candidate(random_state)
_config = config_cs.get_dictionary()
config = (_config[name]
for name in config_space.get_hyperparameter_names())
ndarr_cs = hp_ranges_cs.to_ndarray(config_cs)
ndarr = hp_ranges.to_ndarray(config)
assert_allclose(ndarr_cs, ndarr, rtol=1e-4)
def test_to_ndarray_name_last_pos():
np.random.seed(123456)
random_state = np.random.RandomState(123456)
config_space = CS.ConfigurationSpace()
config_space.add_hyperparameters([
CSH.UniformFloatHyperparameter('a', lower=0., upper=1.),
CSH.UniformIntegerHyperparameter('b', lower=2, upper=3),
CSH.CategoricalHyperparameter('c', choices=('1', '2', '3')),
CSH.UniformIntegerHyperparameter('d', lower=2, upper=3),
CSH.CategoricalHyperparameter('e', choices=('1', '2'))])
hp_a = HyperparameterRangeContinuous(
'a', lower_bound=0., upper_bound=1., scaling=LinearScaling())
hp_b = HyperparameterRangeInteger(
'b', lower_bound=2, upper_bound=3, scaling=LinearScaling())
hp_c = HyperparameterRangeCategorical('c', choices=('1', '2', '3'))
hp_d = HyperparameterRangeInteger(
'd', lower_bound=2, upper_bound=3, scaling=LinearScaling())
hp_e = HyperparameterRangeCategorical('e', choices=('1', '2'))
for name_last_pos in ['a', 'c', 'd', 'e']:
hp_ranges_cs = HyperparameterRanges_CS(
config_space, name_last_pos=name_last_pos)
if name_last_pos == 'a':
lst = [hp_b, hp_c, hp_d, hp_e, hp_a]
elif name_last_pos == 'c':
lst = [hp_a, hp_b, hp_d, hp_e, hp_c]
elif name_last_pos == 'd':
lst = [hp_a, hp_b, hp_c, hp_e, hp_d]
else:
lst = [hp_a, hp_b, hp_c, hp_d, hp_e]
hp_ranges = HyperparameterRanges_Impl(*lst)
names = [hp.name for hp in hp_ranges.hp_ranges]
config_cs = hp_ranges_cs.random_candidate(random_state)
_config = config_cs.get_dictionary()
config = (_config[name] for name in names)
ndarr_cs = hp_ranges_cs.to_ndarray(config_cs)
ndarr = hp_ranges.to_ndarray(config)
assert_allclose(ndarr_cs, ndarr, rtol=1e-4)
|
py | 1a4bc5e9e94b20a146d109d205a2f34696227fa4 | #!/usr/bin/env python
#
# toolbar.py - FSLeyes toolbars
#
# Author: Paul McCarthy <[email protected]>
#
"""This module provides the :class:`FSLeyesToolBar` class, the base class
for all toolbars in *FSLeyes*.
"""
import logging
import wx
import wx.lib.newevent as wxevent
import numpy as np
import fsleyes.panel as fslpanel
import fsleyes.icons as icons
log = logging.getLogger(__name__)
class FSLeyesToolBar(fslpanel.FSLeyesPanel):
"""Base class for all *FSLeyes* toolbars.
The ``FSLeyesToolBar`` is a regular :class:`wx.PyPanel` which to which a
group of *tools* can be added, where a tool may be any ``wx`` control.
See also the :class:`.ControlToolBar`, which is the true base-class for
all toolbars that are added to FSLeyes view panels.
Tools can be added to a ``FSLeyesToolBar`` with the following methods:
.. autosummary::
:nosignatures:
AddTool
InsertTool
InsertTools
SetTools
MakeLabelledTool
When the horizontal size of a ``FSLeyesToolBar`` becomes too small to
display all of its tools, the toolbar is compressed: some tools are
hidden, and buttons are displayed on each end of the toolbar, allowing the
user to scroll through the toolbar, to access the hidden tools. The user
may also use the mouse wheel to scroll through the toolbar.
A collapsed ``FSLeyesToolBar`` looks something like this:
.. image:: images/fsleyestoolbar.png
:scale: 50%
:align: center
"""
def __init__(self,
parent,
overlayList,
displayCtx,
viewPanel,
height=32,
orient=wx.HORIZONTAL,
*args,
**kwargs):
"""Create a ``FSLeyesToolBar``.
:arg parent: The :mod:`wx` parent object.
:arg overlayList: The :class:`.OverlayList`, containing all overlays
being displayed.
:arg displayCtx: A :class:`.DisplayContext`, which defines how the
overlays are to be displayed.
:arg viewPanel: The :class:`.ViewPanel` that owns this toolbar.
:arg height: Desired toolbar height in pixels. This value is used
to look up appropriately sized left/right arrow
icons.
:arg actionz: A dictionary of actions passed through to the
:meth:`.ActionProvider.__init__`.
All other arguments are passed through to
:meth:`.FSLeyesPanel.__init__`.
"""
if orient not in (wx.HORIZONTAL, wx.VERTICAL):
raise ValueError('Invalid orientation: {}'.format(orient))
fslpanel.FSLeyesPanel.__init__(self,
parent,
overlayList,
displayCtx,
viewPanel.frame,
*args,
**kwargs)
self.__tools = []
self.__visibleOffset = 0
self.__numVisible = 0
self.__height = height
self.__orient = orient
font = self.GetFont()
self.SetFont(font.Smaller())
style = wx.BU_EXACTFIT | wx.BU_NOTEXT
if orient == wx.HORIZONTAL:
lBmp = icons.loadBitmap('thinLeftArrow{}' .format(height))
rBmp = icons.loadBitmap('thinRightArrow{}'.format(height))
else:
lBmp = icons.loadBitmap('thinUpArrow{}' .format(height))
rBmp = icons.loadBitmap('thinDownArrow{}'.format(height))
self.__leftButton = wx.Button(self, style=style)
self.__rightButton = wx.Button(self, style=style)
self.__leftButton .SetBitmap(lBmp)
self.__rightButton.SetBitmap(rBmp)
for btn in [self.__leftButton, self.__rightButton]:
size = btn.GetBestSize()
btn.SetMinSize(size)
self.__sizer = wx.BoxSizer(orient)
self.SetSizer(self.__sizer)
self.__sizer.Add(self.__leftButton, flag=wx.EXPAND)
self.__sizer.Add((0, 0), flag=wx.EXPAND, proportion=1)
self.__sizer.Add(self.__rightButton, flag=wx.EXPAND)
self.__leftButton .Bind(wx.EVT_BUTTON, self.__onLeftButton)
self.__rightButton.Bind(wx.EVT_BUTTON, self.__onRightButton)
self .Bind(wx.EVT_MOUSEWHEEL, self.__onMouseWheel)
self .Bind(wx.EVT_SIZE, self.__drawToolBar)
def GetOrient(self):
"""Returns the orientation of this ``FSLeyesToolBar``, either
``wx.HORIZONTAL`` or ``wx.VERTICAL``.
"""
return self.__orient
def MakeLabelledTool(self,
tool,
labelText,
labelSide=wx.TOP,
expand=False):
"""Creates a panel containing the given tool, and a label for the
tool. The panel is returned, but is not added to this
``FSLeyesToolBar`` - you will have to do that yourself, e.g.::
labelledTool = toolbar.MakeLabelledTool(tool, 'Label', wx.BOTTOM)
toolbar.AddTool(labelledTool)
:arg tool: A :mod:`wx` control.
:arg labelText: A label for the tool.
:arg labelSide: Which side of the tool to put the label - ``wx.TOP``,
``wx.BOTTOM``, ``wx.LEFT``, or ``wx.RIGHT``.
:arg expand: Defaults to ``False``. If ``True``, the widget and
label will be set up so they expand to fit all
available space
"""
if labelSide in (wx.TOP, wx.BOTTOM): orient = wx.VERTICAL
elif labelSide in (wx.LEFT, wx.RIGHT): orient = wx.HORIZONTAL
oldParent = tool.GetParent()
panel = wx.Panel(oldParent)
sizer = wx.BoxSizer(orient)
panel.SetSizer(sizer)
tool.Reparent(panel)
label = wx.StaticText(panel, style=wx.ALIGN_CENTRE_HORIZONTAL)
label.SetLabel(labelText)
if expand:
sizerArgs = {
'flag' : wx.EXPAND,
'proportion' : 1
}
else:
sizerArgs = {
'flag' : wx.ALIGN_CENTRE,
}
if labelSide in (wx.TOP, wx.LEFT):
sizer.Add(label, **sizerArgs)
sizer.Add(tool, **sizerArgs)
else:
sizer.Add(tool, **sizerArgs)
sizer.Add(label, **sizerArgs)
return panel
def Enable(self, *args, **kwargs):
"""Enables/disables all tools in this ``FSLeyesToolBar``.
:arg args: Passed to the ``Enable`` method of each tool.
:arg kwargs: Passed to the ``Enable`` method of each tool.
"""
super(FSLeyesToolBar, self).Enable(*args, **kwargs)
for t in self.__tools:
t.Enable(*args, **kwargs)
def GetTools(self):
"""Returns a list containing all tools in this ``FSLeyesToolBar``. """
return self.__tools[:]
def GetToolCount(self):
"""Returns the number of tools in this ``FSLeyesToolBar``. """
return len(self.__tools)
def AddDivider(self):
"""Adds a :class:`.ToolBarDivider` to the end of the toolbar. """
self.InsertDivider()
def InsertDivider(self, index=None):
"""Inserts a :class:`.ToolBarDivider` into the toolbar at the
specified ``index``.
"""
if self.__orient == wx.VERTICAL: orient = wx.HORIZONTAL
elif self.__orient == wx.HORIZONTAL: orient = wx.VERTICAL
self.InsertTool(ToolBarDivider(self, self.__height, orient), index)
def AddTool(self, tool):
"""Adds the given tool to this ``FSLeyesToolBar``. """
self.InsertTool(tool)
def InsertTools(self, tools, index=None):
"""Inserts the given sequence of tools into this ``FSLeyesToolBar``,
at the specified index.
:arg tools: A sequence of tools to add.
:arg index: Insert the tools before this index (default: end).
"""
if index is None:
index = self.GetToolCount()
for i, tool in enumerate(tools, index):
self.InsertTool(tool, i, postevent=False)
wx.PostEvent(self, ToolBarEvent())
def SetTools(self, tools, destroy=False):
"""Replaces all of the existing tools in this ``FSLeyesToolBar``
with the given sequence of tools.
:arg tools: Sequence of new tools to add.
:arg destroy: If ``True`` all of the old tools are destroyed.
"""
self.ClearTools(destroy, postevent=False)
for tool in tools:
self.InsertTool(tool, postevent=False, redraw=False)
self.__drawToolBar()
wx.PostEvent(self, ToolBarEvent())
def InsertTool(self, tool, index=None, postevent=True, redraw=True):
"""Inserts the given tool into this ``FSLeyesToolBar``, at the
specified index.
:arg tool: The tool to insert.
:arg index: Index to insert the tool.
:arg postevent: If ``True``, a :data:`ToolBarEvent` will be generated.
Pass ``False`` to suppress this event.
:arg redraw: If ``True``, the toolbar is redrawn. Pass ``False``
to suppress this behaviour.
"""
if index is None:
index = len(self.__tools)
log.debug('{}: adding tool at index {}: {}'.format(
type(self).__name__, index, type(tool).__name__))
tool.Bind(wx.EVT_MOUSEWHEEL, self.__onMouseWheel)
# gtk3: something somewhere sometimes
# clobbers the best size, so widgets
# don't get shown. Only observed with
# BitmapToggleButtons.
size = tool.GetBestSize()
tool.SetMinSize(size)
tool.SetMaxSize(size)
self.__tools.insert(index, tool)
self.__sizer.Insert(index + 1, tool, flag=wx.ALIGN_CENTRE)
self.InvalidateBestSize()
if redraw:
self.__drawToolBar()
if postevent:
wx.PostEvent(self, ToolBarEvent())
def DoGetBestSize(self):
"""Calculates and returns the best size for this toolbar, simply the
minimum size that will fit all tools.
This method is called by :mod:`wx` when this toolbar is laid out.
"""
# Calculate the minimum/maximum size
# for this toolbar, given the addition
# of the new tool. If the orientation
# of this toolbar (set in __init__) is
# HORIZONTAL, the ttlSpace is used to
# store total width, otherwise it is
# used to store total height.
ttlSpace = 0
minWidth = 0
minHeight = 0
for tool in self.__tools:
tw, th = tool.GetBestSize().Get()
if tw > minWidth: minWidth = tw
if th > minHeight: minHeight = th
if self.__orient == wx.HORIZONTAL: ttlSpace += tw
else: ttlSpace += th
if self.__orient == wx.HORIZONTAL:
leftWidth = self.__leftButton .GetBestSize().GetWidth()
rightWidth = self.__rightButton.GetBestSize().GetWidth()
minWidth = minWidth + leftWidth + rightWidth
else:
topHeight = self.__leftButton .GetBestSize().GetHeight()
bottomHeight = self.__rightButton.GetBestSize().GetHeight()
minHeight = minHeight + topHeight + bottomHeight
if self.__orient == wx.HORIZONTAL: size = (ttlSpace, minHeight)
else: size = (minWidth, ttlSpace)
# The agw.AuiManager does not honour the best size when
# toolbars are floated, but it does honour the minimum
# size. So I'm just setting the minimum size to the best
# size.
log.debug('Setting toolbar size: {}'.format(size))
self.SetMinSize( size)
self.SetMaxSize( size)
self.CacheBestSize(size)
return size
def ClearTools(
self,
destroy=False,
startIdx=None,
endIdx=None,
postevent=True):
"""Removes all tools, or a range of tools, from this
``FSLeyesToolBar``.
:arg destroy: If ``True``, the removed tools are destroyed.
:arg startIdx: Start index of tools to remove. If not provided,
defaults to 0.
:arg endIdx: End index of tools to remove (exclusive). If not
provided, defaults to :meth:`GetToolCount()`.
:arg postevent: If ``True``, a :data:`ToolBarEvent` will be
generated. Set to ``False`` to suppress the event.
"""
if len(self.__tools) == 0:
return
if startIdx is None: startIdx = 0
if endIdx is None: endIdx = len(self.__tools)
for i in range(startIdx, endIdx):
tool = self.__tools[i]
self.__sizer.Detach(tool)
if destroy:
tool.Destroy()
self.__tools[startIdx:endIdx] = []
self.InvalidateBestSize()
self.Layout()
if postevent:
wx.PostEvent(self, ToolBarEvent())
def __onMouseWheel(self, ev):
"""Called when the mouse wheel is rotated on this ``FSLeyesToolBar``.
Calls :meth:`__onLeftButton` or :meth:`__onRightButton`, depending
on the rotation direction.
"""
wheelDir = ev.GetWheelRotation()
if wheelDir < 0: self.__onRightButton()
elif wheelDir > 0: self.__onLeftButton()
def __onLeftButton(self, ev=None):
"""Called when the left toolbar button is pressed.
If the toolbar is compressed, it is scrolled to the left.
"""
self.__visibleOffset -= 1
if self.__visibleOffset <= 0:
self.__visibleOffset = 0
log.debug('Left button pushed - setting start '
'tool index to {}'.format(self.__visibleOffset))
self.__drawToolBar()
def __onRightButton(self, ev=None):
"""Called when the right toolbar button is pressed.
If the toolbar is compressed, it is scrolled to the right.
"""
self.__visibleOffset += 1
if self.__visibleOffset + self.__numVisible >= len(self.__tools):
self.__visibleOffset = len(self.__tools) - self.__numVisible
log.debug('Right button pushed - setting start '
'tool index to {}'.format(self.__visibleOffset))
self.__drawToolBar()
def __drawToolBar(self, *a):
"""Draws this ``FSLeyesToolBar``.
If the toolbar is big enough, all tools are drawn. Otherwise, the
method figures out out how many tools can be drawn, and which tools to
draw, given the current size.
"""
sizer = self.__sizer
tools = self.__tools
orient = self.__orient
lbtn = self.__leftButton
rbtn = self.__rightButton
if orient == wx.HORIZONTAL:
availSpace = self.GetSize().GetWidth()
reqdSpace = [tool.GetBestSize().GetWidth() for tool in tools]
leftSpace = lbtn .GetBestSize().GetWidth()
rightSpace = rbtn .GetBestSize().GetWidth()
else:
availSpace = self.GetSize().GetHeight()
reqdSpace = [tool.GetBestSize().GetHeight() for tool in tools]
leftSpace = lbtn .GetBestSize().GetHeight()
rightSpace = rbtn .GetBestSize().GetHeight()
enoughSpace = availSpace >= sum(reqdSpace)
sizer.Show(lbtn, not enoughSpace)
sizer.Show(rbtn, not enoughSpace)
# show all tools
if enoughSpace:
log.debug('{}: All tools fit ({} >= {})'.format(
type(self).__name__, availSpace, sum(reqdSpace)))
self.__visibleOffset = 0
self.__numVisible = len(tools)
for tool in tools:
sizer.Show(tool)
# show <numVisible> tools, starting from <visibleOffset>
# (see __onMouseWheel/__onLeftButton/__onRightButton)
else:
reqdSpace = reqdSpace[self.__visibleOffset:]
cumSpace = np.cumsum(reqdSpace) + leftSpace + rightSpace
biggerIdxs = [int(i) for i in np.where(cumSpace > availSpace)[0]]
if len(biggerIdxs) == 0:
lastIdx = len(tools)
else:
lastIdx = biggerIdxs[0] + self.__visibleOffset
self.__numVisible = lastIdx - self.__visibleOffset
log.debug('{}: {} tools fit ({} - {})'.format(
type(self).__name__, self.__numVisible, self.__visibleOffset, lastIdx))
lbtn.Enable(self.__visibleOffset > 0)
rbtn.Enable(lastIdx < len(tools))
for i in range(len(tools)):
sizer.Show(tools[i], self.__visibleOffset <= i < lastIdx)
self.Layout()
_ToolBarEvent, _EVT_TOOLBAR_EVENT = wxevent.NewEvent()
EVT_TOOLBAR_EVENT = _EVT_TOOLBAR_EVENT
"""Identifier for the :data:`ToolBarEvent` event. """
ToolBarEvent = _ToolBarEvent
"""Event emitted when one or more tools is/are added/removed to/from a
:class:`FSLeyesToolBar`.
"""
class ToolBarDivider(wx.Panel):
"""An empty ``wx.Panel`` intended to be used for dividing space in a
:class:`FSLeyesToolBar`.
"""
def __init__(self,
parent,
width=10,
height=32,
orient=wx.VERTICAL):
wx.Panel.__init__(self, parent)
if orient == wx.VERTICAL: size = (width, height)
elif orient == wx.HORIZONTAL: size = (height, width)
self.SetMinSize(size)
self.SetMaxSize(size)
|
py | 1a4bc60f3c6994ad82a6ffd0e274fd5050bbb6b7 | #!/usr/bin/env python
"""Utility to generate a table of browser safe colors.
"""
__version__ = '$Id$'
__author__ = "Robin Friedrich"
__date__ = "Feb. 18, 1998"
# colorcube.py
# COPYRIGHT (C) 1998 ROBIN FRIEDRICH email:[email protected]
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation.
# THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
# USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
from HTMLgen import *
hexes = ('00','33','66','99','CC','FF')
def main(filename):
# Create a fresh document
doc = SimpleDocument(title='Web Safe Colors', bgcolor = '#FFFFFF')
# Create an invisible table for layout
bodytable = TableLite(cellpadding=5, border=0, html_escape="off")
# Set up a table for the 216 color cube
colortable = TableLite(cellpadding=0, border=0, cellspacing=1, html_escape="off")
colortable.append(Caption("The 216 Web Color Cube"))
for i in (0,1,2,3,4,5):
for j in (0,1,2,3,4,5):
tr = TR()
for k in (0,1,2,3,4,5):
tr.append(makecell("#%s%s%s" % (hexes[i], hexes[j], hexes[k])))
colortable.append(tr)
# append the table as a cell in the body table
bodyrow = TR(TD(colortable))
# Set up a table for the greyscale
greytable = TableLite(cellpadding=0, border=0, cellspacing=1, html_escape="off")
greytable.append(Caption("The 16 Grey Levels"))
for a in '0123456789ABCDEF':
greytable.append( TR( makecell("#%s%s%s%s%s%s" % ((a,)*6)) ) )
bodyrow.append(TD(greytable))
# Set up a table containing the pure colors
puretable = TableLite(cellpadding=0, border=0, cellspacing=1, html_escape="off")
puretable.append(Caption("The Pure Colors"))
for a in '123456789ABCDEF':
tr = TR()
tr.append(makecell("#%s%s0000" % (a,a) ) )
tr.append(makecell("#00%s%s00" % (a,a) ) )
tr.append(makecell("#0000%s%s" % (a,a) ) )
puretable.append(tr)
bodyrow.append(TD(puretable))
# Now attach the body row to the bodytable
bodytable.append(bodyrow)
# Attach the bodytable to the document and write it all out
doc.append(bodytable)
doc.write(filename)
def makecell(color):
"""Return a table cell object (TD) of the given color tag
"""
cell = TD(bgcolor=color, align='center', height=26, width=60)
cell.append(Font(color, color = contrast(color), size = -2))
return cell
def contrast(color):
"""Compute luminous efficiency of given color and return either
white or black based on which would contrast more.
"""
# I know this is not exact; just my guess
R = eval('0x'+color[1:3])
G = eval('0x'+color[3:5])
B = eval('0x'+color[5:7])
lumen = 0.6*R + G + 0.3*B
if lumen > 250:
return "#000000"
else:
return "#FFFFFF"
if __name__ == '__main__':
main('colorcube.html')
|
py | 1a4bc662cbc94ca24ba23f8d5b38515a5db2237c | _base_ = "finetune-eval-base.py"
# dataset settings
data_source_cfg = dict(
type="ImageListMultihead",
memcached=False,
mclient_path='/no/matter',
# this will be ignored if type != ImageListMultihead
)
data_train_list = "data/xview/meta/train-1000.txt"
data_train_root = 'data/xview'
data_val_list = "data/xview/meta/val.txt"
data_val_root = 'data/xview'
data_test_list = "data/xview/meta/test.txt"
data_test_root = 'data/xview'
dataset_type = "AUROCDataset"
img_norm_cfg = dict(mean=[0.368,0.381,0.3436], std=[0.2035,0.1854,0.1849])
train_pipeline = [
dict(type='RandomResizedCrop', size=224),
dict(type='RandomHorizontalFlip'),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
test_pipeline = [
dict(type='Resize', size=256),
dict(type='CenterCrop', size=224),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
data = dict(
batch_size=64, # x4 from update_interval
workers_per_gpu=5,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list, root=data_train_root,
**data_source_cfg),
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_source=dict(
list_file=data_val_list, root=data_val_root, **data_source_cfg),
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_source=dict(
list_file=data_test_list, root=data_test_root, **data_source_cfg),
pipeline=test_pipeline))
custom_hooks = [
dict(
name="val",
type='ValidateHook',
dataset=data['val'],
by_epoch=False,
initial=False,
interval=25,
imgs_per_gpu=32,
workers_per_gpu=5,
eval_param=dict()),
dict(
name="test",
type='ValidateHook',
dataset=data['test'],
by_epoch=False,
initial=False,
interval=25,
imgs_per_gpu=32,
workers_per_gpu=5,
eval_param=dict()),
]
by_iter =True
# learning policy
lr_config = dict(
by_epoch=False,
policy='step',
step=[833,1667],
gamma=0.1 # multiply LR by this number at each step
)
# momentum and weight decay from VTAB and IDRL
optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.,
paramwise_options={'\Ahead.': dict(lr_mult=100)})
# runtime settings
# total iters or total epochs
total_iters=2500
checkpoint_config = dict(interval=2500)
log_config = dict(
interval=1,
by_epoch=False,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
dict(type='TensorboardLoggerHook', by_epoch=False)
])
optimizer_config = dict(update_interval=4)
|
py | 1a4bc71fef908f0e7246446f044e50ae99d575e7 | '''
Created on 8 mrt. 2011
.. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
epruyt <e.pruyt (at) tudelft (dot) nl>
'''
from __future__ import (division, unicode_literals, print_function,
absolute_import)
from math import exp
from ema_workbench.em_framework import (RealParameter, CategoricalParameter,
Outcome, perform_experiments)
from ema_workbench.util import ema_logging
from ema_workbench.connectors.vensim import VensimModel
class ScarcityModel(VensimModel):
def returnsToScale(self, x, speed, scale):
return (x*1000, scale*1/(1+exp(-1 * speed * (x-50))))
def approxLearning(self, x, speed, scale, start):
x = x-start
loc = 1 - scale
a = (x*10000, scale*1/(1+exp(speed * x))+loc)
return a
def f(self, x, speed, loc):
return (x/10, loc*1/(1+exp(speed * x)))
def priceSubstite(self, x, speed, begin, end):
scale = 2 * end
start = begin - scale/2
return (x+2000, scale*1/(1+exp(-1 * speed * x)) + start)
def run_model(self, scenario, policy):
"""Method for running an instantiated model structure """
kwargs = scenario
loc = kwargs.pop("lookup shortage loc")
speed = kwargs.pop("lookup shortage speed")
lookup = [self.f(x/10, speed, loc) for x in range(0, 100)]
kwargs['shortage price effect lookup'] = lookup
speed = kwargs.pop("lookup price substitute speed")
begin = kwargs.pop("lookup price substitute begin")
end = kwargs.pop("lookup price substitute end")
lookup = [self.priceSubstite(x, speed, begin, end)
for x in range(0, 100, 10)]
kwargs['relative price substitute lookup'] = lookup
scale = kwargs.pop("lookup returns to scale speed")
speed = kwargs.pop("lookup returns to scale scale")
lookup = [self.returnsToScale(x, speed, scale)
for x in range(0, 101, 10)]
kwargs['returns to scale lookup'] = lookup
scale = kwargs.pop("lookup approximated learning speed")
speed = kwargs.pop("lookup approximated learning scale")
start = kwargs.pop("lookup approximated learning start")
lookup = [self.approxLearning(x, speed, scale, start)
for x in range(0, 101, 10)]
kwargs['approximated learning effect lookup'] = lookup
super(ScarcityModel, self).run_model(kwargs, policy)
if __name__ == "__main__":
ema_logging.log_to_stderr(ema_logging.DEBUG)
model = ScarcityModel("scarcity", wd=r'./models/scarcity',
model_file=r'\MetalsEMA.vpm')
model.outcomes = [Outcome('relative market price', time=True),
Outcome('supply demand ratio', time=True),
Outcome('real annual demand', time=True),
Outcome('produced of intrinsically demanded', time=True),
Outcome('supply', time=True),
Outcome('Installed Recycling Capacity', time=True),
Outcome('Installed Extraction Capacity', time=True)]
model.uncertainties = [
RealParameter("price elasticity of demand", 0, 0.5),
RealParameter("fraction of maximum extraction capacity used",
0.6, 1.2),
RealParameter("initial average recycling cost", 1, 4),
RealParameter("exogenously planned extraction capacity",
0, 15000),
RealParameter("absolute recycling loss fraction", 0.1, 0.5),
RealParameter("normal profit margin", 0, 0.4),
RealParameter("initial annual supply", 100000, 120000),
RealParameter("initial in goods", 1500000, 2500000),
RealParameter("average construction time extraction capacity",
1, 10),
RealParameter("average lifetime extraction capacity", 20, 40),
RealParameter("average lifetime recycling capacity", 20, 40),
RealParameter("initial extraction capacity under construction",
5000, 20000),
RealParameter("initial recycling capacity under construction",
5000, 20000),
RealParameter("initial recycling infrastructure", 5000, 20000),
# order of delay
CategoricalParameter("order in goods delay", (1, 4, 10, 1000)),
CategoricalParameter("order recycling capacity delay", (1, 4, 10)),
CategoricalParameter("order extraction capacity delay", (1, 4, 10)),
# uncertainties associated with lookups
RealParameter("lookup shortage loc", 20, 50),
RealParameter("lookup shortage speed", 1, 5),
RealParameter("lookup price substitute speed", 0.1, 0.5),
RealParameter("lookup price substitute begin", 3, 7),
RealParameter("lookup price substitute end", 15, 25),
RealParameter("lookup returns to scale speed", 0.01, 0.2),
RealParameter("lookup returns to scale scale", 0.3, 0.7),
RealParameter("lookup approximated learning speed", 0.01, 0.2),
RealParameter("lookup approximated learning scale", 0.3, 0.6),
RealParameter("lookup approximated learning start", 30, 60)]
results = perform_experiments(model, 50)
|
py | 1a4bc802dd7f4b178f9efa0f89eb215ea5a99f1f | class Solution:
def maxProfit(self, prices: List[int], fee: int) -> int:
# sold[i] selling at day i or do nothing
# sold[i] = max( sold[i-1], hold[i-1] + prices[i] - fee)
# hold[i] buying at day i or do nothing
# hold[i] = max( hold[i-1], sold[i-1] - prices[i])
N = len(prices)
sold = [0] * N
hold = [0] * N
sold[0] = 0
hold[0] = -prices[0]
for i in range(1, N):
sold[i] = max( sold[i-1], hold[i-1] + prices[i] - fee)
hold[i] = max( hold[i-1], sold[i-1] - prices[i])
return sold[N-1]
|
py | 1a4bc844da76865d23d69ffe6fd310fc74021abf | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Constructs model, inputs, and training environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import tensorflow as tf
from object_detection import eval_util
from object_detection import inputs
from object_detection.builders import model_builder
from object_detection.builders import optimizer_builder
from object_detection.core import standard_fields as fields
from object_detection.utils import config_util
from object_detection.utils import label_map_util
from object_detection.utils import shape_utils
from object_detection.utils import variables_helper
from object_detection.utils import visualization_utils as vis_utils
# A map of names to methods that help build the model.
MODEL_BUILD_UTIL_MAP = {
'get_configs_from_pipeline_file':
config_util.get_configs_from_pipeline_file,
'create_pipeline_proto_from_configs':
config_util.create_pipeline_proto_from_configs,
'merge_external_params_with_configs':
config_util.merge_external_params_with_configs,
'create_train_input_fn': inputs.create_train_input_fn,
'create_eval_input_fn': inputs.create_eval_input_fn,
'create_predict_input_fn': inputs.create_predict_input_fn,
}
def _get_groundtruth_data(detection_model, class_agnostic):
"""Extracts groundtruth data from detection_model.
Args:
detection_model: A `DetectionModel` object.
class_agnostic: Whether the detections are class_agnostic.
Returns:
A tuple of:
groundtruth: Dictionary with the following fields:
'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in
normalized coordinates.
'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes.
'groundtruth_masks': 3D float32 tensor of instance masks (if provided in
groundtruth)
class_agnostic: Boolean indicating whether detections are class agnostic.
"""
input_data_fields = fields.InputDataFields()
groundtruth_boxes = detection_model.groundtruth_lists(
fields.BoxListFields.boxes)[0]
# For class-agnostic models, groundtruth one-hot encodings collapse to all
# ones.
if class_agnostic:
groundtruth_boxes_shape = tf.shape(groundtruth_boxes)
groundtruth_classes_one_hot = tf.ones([groundtruth_boxes_shape[0], 1])
else:
groundtruth_classes_one_hot = detection_model.groundtruth_lists(
fields.BoxListFields.classes)[0]
label_id_offset = 1 # Applying label id offset (b/63711816)
groundtruth_classes = (
tf.argmax(groundtruth_classes_one_hot, axis=1) + label_id_offset)
groundtruth = {
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes
}
if detection_model.groundtruth_has_field(fields.BoxListFields.masks):
groundtruth[input_data_fields.groundtruth_instance_masks] = (
detection_model.groundtruth_lists(fields.BoxListFields.masks)[0])
return groundtruth
def unstack_batch(tensor_dict, unpad_groundtruth_tensors=True):
"""Unstacks all tensors in `tensor_dict` along 0th dimension.
Unstacks tensor from the tensor dict along 0th dimension and returns a
tensor_dict containing values that are lists of unstacked tensors.
Tensors in the `tensor_dict` are expected to be of one of the three shapes:
1. [batch_size]
2. [batch_size, height, width, channels]
3. [batch_size, num_boxes, d1, d2, ... dn]
When unpad_groundtruth_tensors is set to true, unstacked tensors of form 3
above are sliced along the `num_boxes` dimension using the value in tensor
field.InputDataFields.num_groundtruth_boxes.
Note that this function has a static list of input data fields and has to be
kept in sync with the InputDataFields defined in core/standard_fields.py
Args:
tensor_dict: A dictionary of batched groundtruth tensors.
unpad_groundtruth_tensors: Whether to remove padding along `num_boxes`
dimension of the groundtruth tensors.
Returns:
A dictionary where the keys are from fields.InputDataFields and values are
a list of unstacked (optionally unpadded) tensors.
Raises:
ValueError: If unpad_tensors is True and `tensor_dict` does not contain
`num_groundtruth_boxes` tensor.
"""
unbatched_tensor_dict = {key: tf.unstack(tensor)
for key, tensor in tensor_dict.items()}
if unpad_groundtruth_tensors:
if (fields.InputDataFields.num_groundtruth_boxes not in
unbatched_tensor_dict):
raise ValueError('`num_groundtruth_boxes` not found in tensor_dict. '
'Keys available: {}'.format(
unbatched_tensor_dict.keys()))
unbatched_unpadded_tensor_dict = {}
unpad_keys = set([
# List of input data fields that are padded along the num_boxes
# dimension. This list has to be kept in sync with InputDataFields in
# standard_fields.py.
fields.InputDataFields.groundtruth_instance_masks,
fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_keypoints,
fields.InputDataFields.groundtruth_group_of,
fields.InputDataFields.groundtruth_difficult,
fields.InputDataFields.groundtruth_is_crowd,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_weights
]).intersection(set(unbatched_tensor_dict.keys()))
for key in unpad_keys:
unpadded_tensor_list = []
for num_gt, padded_tensor in zip(
unbatched_tensor_dict[fields.InputDataFields.num_groundtruth_boxes],
unbatched_tensor_dict[key]):
tensor_shape = shape_utils.combined_static_and_dynamic_shape(
padded_tensor)
slice_begin = tf.zeros([len(tensor_shape)], dtype=tf.int32)
slice_size = tf.stack(
[num_gt] + [-1 if dim is None else dim for dim in tensor_shape[1:]])
unpadded_tensor = tf.slice(padded_tensor, slice_begin, slice_size)
unpadded_tensor_list.append(unpadded_tensor)
unbatched_unpadded_tensor_dict[key] = unpadded_tensor_list
unbatched_tensor_dict.update(unbatched_unpadded_tensor_dict)
return unbatched_tensor_dict
def create_model_fn(detection_model_fn, configs, hparams, use_tpu=False):
"""Creates a model function for `Estimator`.
Args:
detection_model_fn: Function that returns a `DetectionModel` instance.
configs: Dictionary of pipeline config objects.
hparams: `HParams` object.
use_tpu: Boolean indicating whether model should be constructed for
use on TPU.
Returns:
`model_fn` for `Estimator`.
"""
train_config = configs['train_config']
eval_input_config = configs['eval_input_config']
eval_config = configs['eval_config']
def model_fn(features, labels, mode, params=None):
"""Constructs the object detection model.
Args:
features: Dictionary of feature tensors, returned from `input_fn`.
labels: Dictionary of groundtruth tensors if mode is TRAIN or EVAL,
otherwise None.
mode: Mode key from tf.estimator.ModeKeys.
params: Parameter dictionary passed from the estimator.
Returns:
An `EstimatorSpec` that encapsulates the model and its serving
configurations.
"""
params = params or {}
total_loss, train_op, detections, export_outputs = None, None, None, None
is_training = mode == tf.estimator.ModeKeys.TRAIN
detection_model = detection_model_fn(is_training=is_training,
add_summaries=(not use_tpu))
scaffold_fn = None
if mode == tf.estimator.ModeKeys.TRAIN:
labels = unstack_batch(
labels,
unpad_groundtruth_tensors=train_config.unpad_groundtruth_tensors)
elif mode == tf.estimator.ModeKeys.EVAL:
# For evaling on train data, it is necessary to check whether groundtruth
# must be unpadded.
boxes_shape = (
labels[fields.InputDataFields.groundtruth_boxes].get_shape()
.as_list())
unpad_groundtruth_tensors = True if boxes_shape[1] is not None else False
labels = unstack_batch(
labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors)
if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):
gt_boxes_list = labels[fields.InputDataFields.groundtruth_boxes]
gt_classes_list = labels[fields.InputDataFields.groundtruth_classes]
gt_masks_list = None
if fields.InputDataFields.groundtruth_instance_masks in labels:
gt_masks_list = labels[
fields.InputDataFields.groundtruth_instance_masks]
gt_keypoints_list = None
if fields.InputDataFields.groundtruth_keypoints in labels:
gt_keypoints_list = labels[fields.InputDataFields.groundtruth_keypoints]
detection_model.provide_groundtruth(
groundtruth_boxes_list=gt_boxes_list,
groundtruth_classes_list=gt_classes_list,
groundtruth_masks_list=gt_masks_list,
groundtruth_keypoints_list=gt_keypoints_list,
groundtruth_weights_list=labels[
fields.InputDataFields.groundtruth_weights])
preprocessed_images = features[fields.InputDataFields.image]
prediction_dict = detection_model.predict(
preprocessed_images, features[fields.InputDataFields.true_image_shape])
detections = detection_model.postprocess(
prediction_dict, features[fields.InputDataFields.true_image_shape])
if mode == tf.estimator.ModeKeys.TRAIN:
if train_config.fine_tune_checkpoint and hparams.load_pretrained:
if not train_config.fine_tune_checkpoint_type:
# train_config.from_detection_checkpoint field is deprecated. For
# backward compatibility, set train_config.fine_tune_checkpoint_type
# based on train_config.from_detection_checkpoint.
if train_config.from_detection_checkpoint:
train_config.fine_tune_checkpoint_type = 'detection'
else:
train_config.fine_tune_checkpoint_type = 'classification'
asg_map = detection_model.restore_map(
fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type,
load_all_detection_checkpoint_vars=(
train_config.load_all_detection_checkpoint_vars))
available_var_map = (
variables_helper.get_variables_available_in_checkpoint(
asg_map, train_config.fine_tune_checkpoint,
include_global_step=False))
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint,
available_var_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint,
available_var_map)
if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):
losses_dict = detection_model.loss(
prediction_dict, features[fields.InputDataFields.true_image_shape])
losses = [loss_tensor for loss_tensor in losses_dict.itervalues()]
if train_config.add_regularization_loss:
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
if regularization_losses:
regularization_loss = tf.add_n(regularization_losses,
name='regularization_loss')
losses.append(regularization_loss)
losses_dict['Loss/regularization_loss'] = regularization_loss
total_loss = tf.add_n(losses, name='total_loss')
losses_dict['Loss/total_loss'] = total_loss
if mode in [tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL]:
# TODO(rathodv): Stop creating optimizer summary vars in EVAL mode once we
# can write learning rate summaries on TPU without host calls.
global_step = tf.train.get_or_create_global_step()
training_optimizer, optimizer_summary_vars = optimizer_builder.build(
train_config.optimizer)
if mode == tf.estimator.ModeKeys.TRAIN:
if use_tpu:
training_optimizer = tf.contrib.tpu.CrossShardOptimizer(
training_optimizer)
# Optionally freeze some layers by setting their gradients to be zero.
trainable_variables = None
if train_config.freeze_variables:
trainable_variables = tf.contrib.framework.filter_variables(
tf.trainable_variables(),
exclude_patterns=train_config.freeze_variables)
clip_gradients_value = None
if train_config.gradient_clipping_by_norm > 0:
clip_gradients_value = train_config.gradient_clipping_by_norm
if not use_tpu:
for var in optimizer_summary_vars:
tf.summary.scalar(var.op.name, var)
summaries = [] if use_tpu else None
train_op = tf.contrib.layers.optimize_loss(
loss=total_loss,
global_step=global_step,
learning_rate=None,
clip_gradients=clip_gradients_value,
optimizer=training_optimizer,
variables=trainable_variables,
summaries=summaries,
name='') # Preventing scope prefix on all variables.
if mode == tf.estimator.ModeKeys.PREDICT:
export_outputs = {
tf.saved_model.signature_constants.PREDICT_METHOD_NAME:
tf.estimator.export.PredictOutput(detections)
}
eval_metric_ops = None
if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):
class_agnostic = (fields.DetectionResultFields.detection_classes
not in detections)
groundtruth = _get_groundtruth_data(detection_model, class_agnostic)
use_original_images = fields.InputDataFields.original_image in features
original_images = (
features[fields.InputDataFields.original_image] if use_original_images
else features[fields.InputDataFields.image])
eval_dict = eval_util.result_dict_for_single_example(
original_images[0:1],
features[inputs.HASH_KEY][0],
detections,
groundtruth,
class_agnostic=class_agnostic,
scale_to_absolute=False)
if class_agnostic:
category_index = label_map_util.create_class_agnostic_category_index()
else:
category_index = label_map_util.create_category_index_from_labelmap(
eval_input_config.label_map_path)
img_summary = None
if not use_tpu and use_original_images:
detection_and_groundtruth = (
vis_utils.draw_side_by_side_evaluation_image(
eval_dict, category_index, max_boxes_to_draw=20,
min_score_thresh=0.2))
img_summary = tf.summary.image('Detections_Left_Groundtruth_Right',
detection_and_groundtruth)
if mode == tf.estimator.ModeKeys.EVAL:
# Eval metrics on a single example.
eval_metrics = eval_config.metrics_set
if not eval_metrics:
eval_metrics = ['coco_detection_metrics']
eval_metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
eval_metrics, category_index.values(), eval_dict,
include_metrics_per_category=False)
for loss_key, loss_tensor in iter(losses_dict.items()):
eval_metric_ops[loss_key] = tf.metrics.mean(loss_tensor)
for var in optimizer_summary_vars:
eval_metric_ops[var.op.name] = (var, tf.no_op())
if img_summary is not None:
eval_metric_ops['Detections_Left_Groundtruth_Right'] = (
img_summary, tf.no_op())
eval_metric_ops = {str(k): v for k, v in eval_metric_ops.iteritems()}
if use_tpu:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
scaffold_fn=scaffold_fn,
predictions=detections,
loss=total_loss,
train_op=train_op,
eval_metrics=eval_metric_ops,
export_outputs=export_outputs)
else:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=detections,
loss=total_loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs)
return model_fn
def create_estimator_and_inputs(run_config,
hparams,
pipeline_config_path,
train_steps=None,
eval_steps=None,
model_fn_creator=create_model_fn,
use_tpu_estimator=False,
use_tpu=False,
num_shards=1,
params=None,
**kwargs):
"""Creates `Estimator`, input functions, and steps.
Args:
run_config: A `RunConfig`.
hparams: A `HParams`.
pipeline_config_path: A path to a pipeline config file.
train_steps: Number of training steps. If None, the number of training steps
is set from the `TrainConfig` proto.
eval_steps: Number of evaluation steps per evaluation cycle. If None, the
number of evaluation steps is set from the `EvalConfig` proto.
model_fn_creator: A function that creates a `model_fn` for `Estimator`.
Follows the signature:
* Args:
* `detection_model_fn`: Function that returns `DetectionModel` instance.
* `configs`: Dictionary of pipeline config objects.
* `hparams`: `HParams` object.
* Returns:
`model_fn` for `Estimator`.
use_tpu_estimator: Whether a `TPUEstimator` should be returned. If False,
an `Estimator` will be returned.
use_tpu: Boolean, whether training and evaluation should run on TPU. Only
used if `use_tpu_estimator` is True.
num_shards: Number of shards (TPU cores). Only used if `use_tpu_estimator`
is True.
params: Parameter dictionary passed from the estimator. Only used if
`use_tpu_estimator` is True.
**kwargs: Additional keyword arguments for configuration override.
Returns:
A dictionary with the following fields:
'estimator': An `Estimator` or `TPUEstimator`.
'train_input_fn': A training input function.
'eval_input_fn': An evaluation input function.
'eval_on_train_input_fn': An evaluation-on-train input function.
'predict_input_fn': A prediction input function.
'train_steps': Number of training steps. Either directly from input or from
configuration.
'eval_steps': Number of evaluation steps. Either directly from input or from
configuration.
"""
get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[
'get_configs_from_pipeline_file']
merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[
'merge_external_params_with_configs']
create_pipeline_proto_from_configs = MODEL_BUILD_UTIL_MAP[
'create_pipeline_proto_from_configs']
create_train_input_fn = MODEL_BUILD_UTIL_MAP['create_train_input_fn']
create_eval_input_fn = MODEL_BUILD_UTIL_MAP['create_eval_input_fn']
create_predict_input_fn = MODEL_BUILD_UTIL_MAP['create_predict_input_fn']
configs = get_configs_from_pipeline_file(pipeline_config_path)
configs = merge_external_params_with_configs(
configs,
hparams,
train_steps=train_steps,
eval_steps=eval_steps,
**kwargs)
model_config = configs['model']
train_config = configs['train_config']
train_input_config = configs['train_input_config']
eval_config = configs['eval_config']
eval_input_config = configs['eval_input_config']
if train_steps is None:
train_steps = configs['train_config'].num_steps
if eval_steps is None:
eval_steps = configs['eval_config'].num_examples
detection_model_fn = functools.partial(
model_builder.build, model_config=model_config)
# Create the input functions for TRAIN/EVAL/PREDICT.
train_input_fn = create_train_input_fn(
train_config=train_config,
train_input_config=train_input_config,
model_config=model_config)
eval_input_fn = create_eval_input_fn(
eval_config=eval_config,
eval_input_config=eval_input_config,
model_config=model_config)
eval_on_train_input_fn = create_eval_input_fn(
eval_config=eval_config,
eval_input_config=train_input_config,
model_config=model_config)
predict_input_fn = create_predict_input_fn(model_config=model_config)
model_fn = model_fn_creator(detection_model_fn, configs, hparams, use_tpu)
if use_tpu_estimator:
estimator = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
train_batch_size=train_config.batch_size,
# For each core, only batch size 1 is supported for eval.
eval_batch_size=num_shards * 1 if use_tpu else 1,
use_tpu=use_tpu,
config=run_config,
params=params if params else {})
else:
estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config)
# Write the as-run pipeline config to disk.
if run_config.is_chief:
pipeline_config_final = create_pipeline_proto_from_configs(
configs)
config_util.save_pipeline_config(pipeline_config_final, estimator.model_dir)
return dict(
estimator=estimator,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
eval_on_train_input_fn=eval_on_train_input_fn,
predict_input_fn=predict_input_fn,
train_steps=train_steps,
eval_steps=eval_steps)
def create_train_and_eval_specs(train_input_fn,
eval_input_fn,
eval_on_train_input_fn,
predict_input_fn,
train_steps,
eval_steps,
eval_on_train_data=False,
final_exporter_name='Servo',
eval_spec_name='eval'):
"""Creates a `TrainSpec` and `EvalSpec`s.
Args:
train_input_fn: Function that produces features and labels on train data.
eval_input_fn: Function that produces features and labels on eval data.
eval_on_train_input_fn: Function that produces features and labels for
evaluation on train data.
predict_input_fn: Function that produces features for inference.
train_steps: Number of training steps.
eval_steps: Number of eval steps.
eval_on_train_data: Whether to evaluate model on training data. Default is
False.
final_exporter_name: String name given to `FinalExporter`.
eval_spec_name: String name given to main `EvalSpec`.
Returns:
Tuple of `TrainSpec` and list of `EvalSpecs`. The first `EvalSpec` is for
evaluation data. If `eval_on_train_data` is True, the second `EvalSpec` in
the list will correspond to training data.
"""
exporter = tf.estimator.FinalExporter(
name=final_exporter_name, serving_input_receiver_fn=predict_input_fn)
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn, max_steps=train_steps)
eval_specs = [
tf.estimator.EvalSpec(
name=eval_spec_name,
input_fn=eval_input_fn,
steps=eval_steps,
exporters=exporter)
]
if eval_on_train_data:
eval_specs.append(
tf.estimator.EvalSpec(
name='eval_on_train', input_fn=eval_on_train_input_fn,
steps=eval_steps))
return train_spec, eval_specs
def continuous_eval(estimator, model_dir, input_fn, eval_steps, train_steps,
name):
"""Perform continuous evaluation on checkpoints written to a model directory.
Args:
estimator: Estimator object to use for evaluation.
model_dir: Model directory to read checkpoints for continuous evaluation.
input_fn: Input function to use for evaluation.
eval_steps: Number of steps to run during each evaluation.
train_steps: Number of training steps. This is used to infer the last
checkpoint and stop evaluation loop.
name: Namescope for eval summary.
"""
def terminate_eval():
tf.logging.info('Terminating eval after 180 seconds of no checkpoints')
return True
for ckpt in tf.contrib.training.checkpoints_iterator(
model_dir, min_interval_secs=180, timeout=None,
timeout_fn=terminate_eval):
tf.logging.info('Starting Evaluation.')
try:
eval_results = estimator.evaluate(
input_fn=input_fn,
steps=eval_steps,
checkpoint_path=ckpt,
name=name)
tf.logging.info('Eval results: %s' % eval_results)
# Terminate eval job when final checkpoint is reached
current_step = int(os.path.basename(ckpt).split('-')[1])
if current_step >= train_steps:
tf.logging.info(
'Evaluation finished after training step %d' % current_step)
break
except tf.errors.NotFoundError:
tf.logging.info(
'Checkpoint %s no longer exists, skipping checkpoint' % ckpt)
def populate_experiment(run_config,
hparams,
pipeline_config_path,
train_steps=None,
eval_steps=None,
model_fn_creator=create_model_fn,
**kwargs):
"""Populates an `Experiment` object.
EXPERIMENT CLASS IS DEPRECATED. Please switch to
tf.estimator.train_and_evaluate. As an example, see model_main.py.
Args:
run_config: A `RunConfig`.
hparams: A `HParams`.
pipeline_config_path: A path to a pipeline config file.
train_steps: Number of training steps. If None, the number of training steps
is set from the `TrainConfig` proto.
eval_steps: Number of evaluation steps per evaluation cycle. If None, the
number of evaluation steps is set from the `EvalConfig` proto.
model_fn_creator: A function that creates a `model_fn` for `Estimator`.
Follows the signature:
* Args:
* `detection_model_fn`: Function that returns `DetectionModel` instance.
* `configs`: Dictionary of pipeline config objects.
* `hparams`: `HParams` object.
* Returns:
`model_fn` for `Estimator`.
**kwargs: Additional keyword arguments for configuration override.
Returns:
An `Experiment` that defines all aspects of training, evaluation, and
export.
"""
tf.logging.warning('Experiment is being deprecated. Please use '
'tf.estimator.train_and_evaluate(). See model_main.py for '
'an example.')
train_and_eval_dict = create_estimator_and_inputs(
run_config,
hparams,
pipeline_config_path,
train_steps=train_steps,
eval_steps=eval_steps,
model_fn_creator=model_fn_creator,
**kwargs)
estimator = train_and_eval_dict['estimator']
train_input_fn = train_and_eval_dict['train_input_fn']
eval_input_fn = train_and_eval_dict['eval_input_fn']
predict_input_fn = train_and_eval_dict['predict_input_fn']
train_steps = train_and_eval_dict['train_steps']
eval_steps = train_and_eval_dict['eval_steps']
export_strategies = [
tf.contrib.learn.utils.saved_model_export_utils.make_export_strategy(
serving_input_fn=predict_input_fn)
]
return tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=train_steps,
eval_steps=eval_steps,
export_strategies=export_strategies,
eval_delay_secs=120,)
|
py | 1a4bca413e7935c59087fda972fba889d601f457 | '''
Runs the whole process in one file for a .csv positional data file (time, x, y, z)
and generates the final set of keplerian elements along with a plot and a filtered.csv data file
'''
from util import (read_data, kep_state, rkf78, golay_window)
from filters import (sav_golay, triple_moving_average)
from kep_determination import (lamberts_kalman, interpolation, ellipse_fit, gibbsMethod)
import argparse
import numpy as np
import matplotlib as mpl
import matplotlib.pylab as plt
from propagation import sgp4
import inquirer
def process(data_file, error_apriori, units):
'''
Given a .csv data file in the format of (time, x, y, z) applies both filters, generates a filtered.csv data
file, prints out the final keplerian elements computed from both Lamberts and Interpolation and finally plots
the initial, filtered data set and the final orbit.
Args:
data_file (string): The name of the .csv file containing the positional data
error_apriori (float): apriori estimation of the measurements error in km
Returns:
Runs the whole process of the program
'''
# First read the csv file called "orbit" with the positional data
data = read_data.load_data(data_file)
if(units == 'm'):
# Transform m to km
data[:, 1:4] = data[:, 1:4] / 1000
print("***********Choose filter(s) in desired order of application***********")
print("(SPACE to toggle, UP/DOWN to navigate, RIGHT/LEFT to select/deselect and ENTER to submit)")
print("*if nothing is selected, Triple Moving Average followed by Savitzky Golay will be applied")
questions = [
inquirer.Checkbox('filter',
message="Select filter(s)",
choices=['Savitzky Golay Filter', 'Triple Moving Average Filter'],
),
]
choices = inquirer.prompt(questions)
data_after_filter = data
if(len(choices['filter']) == 0):
print("Applying Triple Moving Average followed by Savitzky Golay...")
# Apply the Triple moving average filter with window = 3
data_after_filter = triple_moving_average.generate_filtered_data(data_after_filter, 3)
# Use the golay_window.py script to find the window for the Savitzky Golay filter based on the error you input
window = golay_window.window(error_apriori, data_after_filter)
# Apply the Savitzky Golay filter with window = window (51 for orbit.csv) and polynomial order = 3
data_after_filter = sav_golay.golay(data_after_filter, window, 3)
else:
for index, choice in enumerate(choices['filter']):
if(choice == 'Savitzky Golay Filter'):
print("Applying Savitzky Golay Filter...")
# Use the golay_window.py script to find the window for the Savitzky Golay filter based on the error you input
window = golay_window.window(error_apriori, data_after_filter)
# Apply the Savitzky Golay filter with window = window (51 for orbit.csv) and polynomial order = 3
data_after_filter = sav_golay.golay(data_after_filter, window, 3)
else:
print("Applying Triple Moving Average Filter...")
# Apply the Triple moving average filter with window = 3
data_after_filter = triple_moving_average.generate_filtered_data(data_after_filter, 3)
# Compute the residuals between filtered data and initial data and then the sum and mean values of each axis
res = data_after_filter[:, 1:4] - data[:, 1:4]
sums = np.sum(res, axis=0)
print("\nDisplaying the sum of the residuals for each axis")
print(sums, "\n")
means = np.mean(res, axis=0)
print("Displaying the mean of the residuals for each axis")
print(means, "\n")
# Save the filtered data into a new csv called "filtered"
np.savetxt("filtered.csv", data_after_filter, delimiter=",")
print("***********Choose Method(s) for Orbit Determination***********")
print("(SPACE to toggle, UP/DOWN to navigate, RIGHT/LEFT to select/deselect and ENTER to submit)")
print("*if nothing is selected, Cubic Spline Interpolation will be used for Orbit Determination")
questions = [
inquirer.Checkbox('method',
message="Select Method(s)",
choices=['Lamberts Kalman', 'Cubic Spline Interpolation', 'Ellipse Best Fit', 'Gibbs 3 Vector'],
),
]
choices = inquirer.prompt(questions)
kep_elements = {}
if(len(choices['method']) == 0):
# Apply the interpolation method
kep_inter = interpolation.main(data_after_filter)
# Apply Kalman filters to find the best approximation of the keplerian elements for all solutions
# We set an estimate of measurement variance R = 0.01 ** 2
kep_final_inter = lamberts_kalman.kalman(kep_inter, 0.01 ** 2)
kep_final_inter = np.transpose(kep_final_inter)
kep_final_inter = np.resize(kep_final_inter, ((7, 1)))
kep_final_inter[6, 0] = sgp4.rev_per_day(kep_final_inter[0, 0])
kep_elements['Cubic Spline Interpolation'] = kep_final_inter
else:
for index, choice in enumerate(choices['method']):
if(choice == 'Lamberts Kalman'):
# Apply Lambert Kalman method for the filtered data set
kep_lamb = lamberts_kalman.create_kep(data_after_filter)
# Apply Kalman filters to find the best approximation of the keplerian elements for all solutions
# We set an estimate of measurement variance R = 0.01 ** 2
kep_final_lamb = lamberts_kalman.kalman(kep_lamb, 0.01 ** 2)
kep_final_lamb = np.transpose(kep_final_lamb)
kep_final_lamb = np.resize(kep_final_lamb, ((7, 1)))
kep_final_lamb[6, 0] = sgp4.rev_per_day(kep_final_lamb[0, 0])
kep_elements['Lamberts Kalman'] = kep_final_lamb
elif(choice == 'Cubic Spline Interpolation'):
# Apply the interpolation method
kep_inter = interpolation.main(data_after_filter)
# Apply Kalman filters to find the best approximation of the keplerian elements for all solutions
# We set an estimate of measurement variance R = 0.01 ** 2
kep_final_inter = lamberts_kalman.kalman(kep_inter, 0.01 ** 2)
kep_final_inter = np.transpose(kep_final_inter)
kep_final_inter = np.resize(kep_final_inter, ((7, 1)))
kep_final_inter[6, 0] = sgp4.rev_per_day(kep_final_inter[0, 0])
kep_elements['Cubic Spline Interpolation'] = kep_final_inter
elif(choice == 'Ellipse Best Fit'):
# Apply the ellipse best fit method
kep_ellip = ellipse_fit.determine_kep(data_after_filter[:, 1:])[0]
kep_final_ellip = np.transpose(kep_ellip)
kep_final_ellip = np.resize(kep_final_ellip, ((7, 1)))
kep_final_ellip[6, 0] = sgp4.rev_per_day(kep_final_ellip[0, 0])
kep_elements['Ellipse Best Fit'] = kep_final_ellip
else:
# Apply the Gibbs method
kep_gibbs = gibbsMethod.gibbs_get_kep(data_after_filter[:,1:])
# Apply Kalman filters to find the best approximation of the keplerian elements for all solutions
# We set an estimate of measurement variance R = 0.01 ** 2
kep_final_gibbs = lamberts_kalman.kalman(kep_gibbs, 0.01 ** 2)
kep_final_gibbs = np.transpose(kep_final_gibbs)
kep_final_gibbs = np.resize(kep_final_gibbs, ((7, 1)))
kep_final_gibbs[6, 0] = sgp4.rev_per_day(kep_final_gibbs[0, 0])
kep_elements['Gibbs 3 Vector'] = kep_final_gibbs
kep_final = np.zeros((7, len(kep_elements)))
order = []
for index, key in enumerate(kep_elements):
kep_final[:, index] = np.ravel(kep_elements[key])
order.append(str(key))
# Print the final orbital elements for all solutions
kep_elements = ["Semi major axis (a)(km)", "Eccentricity (e)", "Inclination (i)(deg)", "Argument of perigee (ω)(deg)", "Right acension of ascending node (Ω)(deg)", "True anomaly (v)(deg)", "Frequency (f)(rev/day)"]
for i in range(0, len(order)):
print("\n******************Output for %s Method******************\n" % order[i])
for j in range(0, 7):
print("%s: %.16f" % (kep_elements[j], kep_final[j, i]))
print("\nShow plots? [y/n]")
user_input = input()
if(user_input == "y" or user_input == "Y"):
for j in range(0, len(order)):
# Plot the initial data set, the filtered data set and the final orbit
# First we transform the set of keplerian elements into a state vector
state = kep_state.kep_state(np.resize(kep_final[:, j], (7, 1)))
# Then we produce more state vectors at varius times using a Runge Kutta algorithm
keep_state = np.zeros((6, 150))
ti = 0.0
tf = 1.0
t_hold = np.zeros((150, 1))
x = state
h = 0.1
tetol = 1e-04
for i in range(0, 150):
keep_state[:, i] = np.ravel(rkf78.rkf78(6, ti, tf, h, tetol, x))
t_hold[i, 0] = tf
tf = tf + 1
positions = keep_state[0:3, :]
## Finally we plot the graph
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(data[:, 1], data[:, 2], data[:, 3], ".", label='Initial data ')
ax.plot(data_after_filter[:, 1], data_after_filter[:, 2], data_after_filter[:, 3], "k", linestyle='-',
label='Filtered data')
ax.plot(positions[0, :], positions[1, :], positions[2, :], "r-", label='Orbit after %s method' % order[j])
ax.legend()
ax.can_zoom()
ax.set_xlabel('x (km)')
ax.set_ylabel('y (km)')
ax.set_zlabel('z (km)')
plt.show()
def read_args():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file_path', type=str, help="path to .csv data file", default='orbit.csv')
parser.add_argument('-e', '--error', type=float, help="estimation of the measurement error", default=10.0)
parser.add_argument('-u', '--units', type=str, help="m for metres, k for kilometres", default='k')
return parser.parse_args()
if __name__ == "__main__":
print("\n************Welcome To OrbitDeterminator************\n")
print("Workflow for OrbitDeterminator is as follows:")
workflow = " ----------- ----------------------\n"\
"Positional data--->| Filters |--->| Keplerian elements |--->Determined Orbit\n"\
" | | | Determination |\n"\
" ----------- ----------------------\n\n"\
"Available filters: | Available methods for orbit determination:\n"\
" 1. Savitzky Golay Filter | 1. Lamberts Kalman\n"\
" 2. Triple Moving Average Filter| 2. Cubic spline interpolation\n"\
" | 3. Ellipse Bset Fit\n"\
" | 4. Gibbs 3 Vector\n"
print("\n" + workflow)
args = read_args()
process(args.file_path, args.error, args.units)
|
py | 1a4bcb013417e5cc9bf8d759f6cfd07c504f1fbe | #!/usr/bin/env python3
# Copyright (c) 2019-2020 The MicroBitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test that we reject low difficulty headers to prevent our block tree from filling up with useless bloat"""
from test_framework.messages import (
CBlockHeader,
from_hex,
)
from test_framework.p2p import (
P2PInterface,
msg_headers,
)
from test_framework.test_framework import MicroBitcoinTestFramework
import os
class RejectLowDifficultyHeadersTest(MicroBitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.chain = 'testnet3' # Use testnet chain because it has an early checkpoint
self.num_nodes = 2
def add_options(self, parser):
parser.add_argument(
'--datafile',
default='data/blockheader_testnet3.hex',
help='Test data file (default: %(default)s)',
)
def run_test(self):
self.log.info("Read headers data")
self.headers_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), self.options.datafile)
with open(self.headers_file_path, encoding='utf-8') as headers_data:
h_lines = [l.strip() for l in headers_data.readlines()]
# The headers data is taken from testnet3 for early blocks from genesis until the first checkpoint. There are
# two headers with valid POW at height 1 and 2, forking off from genesis. They are indicated by the FORK_PREFIX.
FORK_PREFIX = 'fork:'
self.headers = [l for l in h_lines if not l.startswith(FORK_PREFIX)]
self.headers_fork = [l[len(FORK_PREFIX):] for l in h_lines if l.startswith(FORK_PREFIX)]
self.headers = [from_hex(CBlockHeader(), h) for h in self.headers]
self.headers_fork = [from_hex(CBlockHeader(), h) for h in self.headers_fork]
self.log.info("Feed all non-fork headers, including and up to the first checkpoint")
peer_checkpoint = self.nodes[0].add_p2p_connection(P2PInterface())
peer_checkpoint.send_and_ping(msg_headers(self.headers))
assert {
'height': 546,
'hash': '000000002a936ca763904c3c35fce2f3556c559c0214345d31b1bcebf76acb70',
'branchlen': 546,
'status': 'headers-only',
} in self.nodes[0].getchaintips()
self.log.info("Feed all fork headers (fails due to checkpoint)")
with self.nodes[0].assert_debug_log(['bad-fork-prior-to-checkpoint']):
peer_checkpoint.send_message(msg_headers(self.headers_fork))
peer_checkpoint.wait_for_disconnect()
self.log.info("Feed all fork headers (succeeds without checkpoint)")
# On node 0 it succeeds because checkpoints are disabled
self.restart_node(0, extra_args=['-nocheckpoints'])
peer_no_checkpoint = self.nodes[0].add_p2p_connection(P2PInterface())
peer_no_checkpoint.send_and_ping(msg_headers(self.headers_fork))
assert {
"height": 2,
"hash": "00000000b0494bd6c3d5ff79c497cfce40831871cbf39b1bc28bd1dac817dc39",
"branchlen": 2,
"status": "headers-only",
} in self.nodes[0].getchaintips()
# On node 1 it succeeds because no checkpoint has been reached yet by a chain tip
peer_before_checkpoint = self.nodes[1].add_p2p_connection(P2PInterface())
peer_before_checkpoint.send_and_ping(msg_headers(self.headers_fork))
assert {
"height": 2,
"hash": "00000000b0494bd6c3d5ff79c497cfce40831871cbf39b1bc28bd1dac817dc39",
"branchlen": 2,
"status": "headers-only",
} in self.nodes[1].getchaintips()
if __name__ == '__main__':
RejectLowDifficultyHeadersTest().main()
|
py | 1a4bcb3e35cf4332739df78ea89c8308d33e612b | r"""
Generate interpreters for fast_callable
AUTHORS:
- Carl Witty
This file is part of the Sage support for "planned" computations;
that is, computations that are separated into a planning stage and
a plan-execution stage. Here, we generate fast interpreters for plan
executions.
There are at least two kinds of computations that are often planned in
this fashion. First is arithmetic expression evaluation, where we
take an arbitrary user-specified arithmetic expression and compile it
into a bytecode form for fast interpretation. Second is things like
FFTs and large multiplications, where large problems are split into
multiple smaller problems... we can do the logical "splitting" for a
given size only once, producing a plan which can be reused as often as
we want for different problems of the same size. Currently only
arithmetic expression evaluation is implemented, but other kinds of
planned computations should be easy to add.
Typically, for arithmetic expressions, we want the storage of
intermediate results to be handled automatically (on a stack); for
FFTs/multiplications/etc., the planner will keep track of intermediate
results itself.
For arithmetic expression evaluation, we want to have lots of
interpreters (at least one, and possibly several, per
specially-handled type). Also, for any given type, we have many
possible variants of instruction encoding, etc.; some of these could
be handled with conditional compilation, but some are more
complicated. So we end up writing an interpreter generator.
We want to share as much code as possible across all of these
interpreters, while still maintaining the freedom to make drastic
changes in the interpretation strategy (which may change the
generated code, the calling convention for the interpreter, etc.)
To make this work, the interpreter back-end is divided into three
parts:
1. The interpreter itself, in C or C++.
2. The wrapper, which is a Cython object holding the
constants, code, etc., and which actually calls the interpreter.
3. The code generator.
We generate parts 1 and 2. The code generator is table-driven,
and we generate the tables for the code generator.
There are a lot of techniques for fast interpreters that we do not yet
use; hopefully at least some of these will eventually be implemented:
- using gcc's "labels as values" extension where available
- top-of-stack caching
- superinstructions and/or superoperators
- static stack caching
- context threading/subrouting threading
- selective inlining/dynamic superinstructions
- automatic replication
Interpreters may be stack-based or register-based. Recent research
suggests that register-based interpreters are better, but the
researchers are investigating interpreters for entire programming
languages, rather than interpreters for expressions. I suspect
that stack-based expression interpreters may be better. However,
we'll implement both varieties and see what's best.
The relative costs of stack- and register-based interpreters will
depend on the costs of moving values. For complicated types (like
mpz_t), a register-based interpreter will quite likely be better,
since it will avoid moving values.
We will NOT support any sort of storage of bytecode; instead, the
code must be re-generated from expression trees in every Sage run.
This means that we can trivially experiment with different styles of
interpreter, or even use quite different interpreters depending on
the architecture, without having to worry about forward and backward
compatibility.
"""
#*****************************************************************************
# Copyright (C) 2009 Carl Witty <[email protected]>
# Copyright (C) 2015 Jeroen Demeyer <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import print_function
import os
import re
import six
from jinja2 import Environment
from jinja2.runtime import StrictUndefined
from collections import defaultdict
from distutils.extension import Extension
##############################
# This module is used during the Sage build process, so it should not
# use any other Sage modules. (In particular, it MUST NOT use any
# Cython modules -- they won't be built yet!)
# Also, we have some trivial dependency tracking, where we don't
# rebuild the interpreters if this file hasn't changed; if
# interpreter configuration is split out into a separate file,
# that will have to be changed.
##############################
# We share a single jinja2 environment among all templating in this
# file. We use trim_blocks=True (which means that we ignore white
# space after "%}" jinja2 command endings), and set undefined to
# complain if we use an undefined variable.
jinja_env = Environment(trim_blocks=True, undefined=StrictUndefined)
# Allow 'i' as a shorter alias for the built-in 'indent' filter.
jinja_env.filters['i'] = jinja_env.filters['indent']
autogen_warn = "Automatically generated by {}. Do not edit!".format(__file__)
def indent_lines(n, text):
r"""
INPUTS:
- n -- indentation amount
- text -- text to indent
Indents each line in text by n spaces.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import indent_lines
sage: indent_lines(3, "foo")
' foo'
sage: indent_lines(3, "foo\nbar")
' foo\n bar'
sage: indent_lines(3, "foo\nbar\n")
' foo\n bar\n'
"""
lines = text.splitlines(True)
spaces = ' ' * n
return ''.join(spaces + line for line in lines)
def je(template, **kwargs):
r"""
A convenience method for creating strings with Jinja templates.
The name je stands for "Jinja evaluate".
The first argument is the template string; remaining keyword
arguments define Jinja variables.
If the first character in the template string is a newline, it is
removed (this feature is useful when using multi-line templates defined
with triple-quoted strings -- the first line doesn't have to be on
the same line as the quotes, which would screw up the indentation).
(This is very inefficient, because it recompiles the Jinja
template on each call; don't use it in situations where
performance is important.)
EXAMPLES::
sage: from sage_setup.autogen.interpreters import je
sage: je("{{ a }} > {{ b }} * {{ c }}", a='"a suffusion of yellow"', b=3, c=7)
u'"a suffusion of yellow" > 3 * 7'
"""
if len(template) > 0 and template[0] == '\n':
template = template[1:]
# It looks like Jinja2 automatically removes one trailing newline?
if len(template) > 0 and template[-1] == '\n':
template = template + '\n'
tmpl = jinja_env.from_string(template)
return tmpl.render(kwargs)
class StorageType(object):
r"""
A StorageType specifies the C types used to deal with values of a
given type.
We currently support three categories of types.
First are the "simple" types. These are types where: the
representation is small, functions expect arguments to be passed
by value, and the C/C++ assignment operator works. This would
include built-in C types (long, float, etc.) and small structs
(like gsl_complex).
Second is 'PyObject*'. This is just like a simple type, except
that we have to incref/decref at appropriate places.
Third is "auto-reference" types. This is how
GMP/MPIR/MPFR/MPFI/FLINT types work. For these types, functions
expect arguments to be passed by reference, and the C assignment
operator does not do what we want. In addition, they take
advantage of a quirk in C (where arrays are automatically
converted to pointers) to automatically pass arguments by
reference.
Support for further categories would not be difficult to add (such
as reference-counted types other than PyObject*, or
pass-by-reference types that don't use the GMP auto-reference
trick), if we ever run across a use for them.
"""
def __init__(self):
r"""
Initialize an instance of StorageType.
This sets several properties:
class_member_declarations:
A string giving variable declarations that must be members of any
wrapper class using this type.
class_member_initializations:
A string initializing the class_member_declarations; will be
inserted into the __init__ method of any wrapper class using this
type.
local_declarations:
A string giving variable declarations that must be local variables
in Cython methods using this storage type.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_double.class_member_declarations
''
sage: ty_double.class_member_initializations
''
sage: ty_double.local_declarations
''
sage: ty_mpfr.class_member_declarations
'cdef RealField_class domain\n'
sage: ty_mpfr.class_member_initializations
"self.domain = args['domain']\n"
sage: ty_mpfr.local_declarations
'cdef RealNumber rn\n'
"""
self.class_member_declarations = ''
self.class_member_initializations = ''
self.local_declarations = ''
def cheap_copies(self):
r"""
Returns True or False, depending on whether this StorageType
supports cheap copies -- whether it is cheap to copy values of
this type from one location to another. This is true for
primitive types, and for types like PyObject* (where you're only
copying a pointer, and possibly changing some reference counts).
It is false for types like mpz_t and mpfr_t, where copying values
can involve arbitrarily much work (including memory allocation).
The practical effect is that if cheap_copies is True,
instructions with outputs of this type write the results into
local variables, and the results are then copied to their
final locations. If cheap_copies is False, then the addresses
of output locations are passed into the instruction and the
instruction writes outputs directly in the final location.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_double.cheap_copies()
True
sage: ty_python.cheap_copies()
True
sage: ty_mpfr.cheap_copies()
False
"""
return False
def python_refcounted(self):
r"""
Says whether this storage type is a Python type, so we need to
use INCREF/DECREF.
(If we needed to support any non-Python refcounted types, it
might be better to make this object-oriented and have methods
like "generate an incref" and "generate a decref". But as
long as we only support Python, this way is probably simpler.)
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_double.python_refcounted()
False
sage: ty_python.python_refcounted()
True
"""
return False
def cython_decl_type(self):
r"""
Give the Cython type for a single value of this type (as a string).
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_double.cython_decl_type()
'double'
sage: ty_python.cython_decl_type()
'object'
sage: ty_mpfr.cython_decl_type()
'mpfr_t'
"""
return self.c_decl_type()
def cython_array_type(self):
r"""
Give the Cython type for referring to an array of values of
this type (as a string).
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_double.cython_array_type()
'double*'
sage: ty_python.cython_array_type()
'PyObject**'
sage: ty_mpfr.cython_array_type()
'mpfr_t*'
"""
return self.c_ptr_type()
def needs_cython_init_clear(self):
r"""
Says whether values/arrays of this type need to be initialized
before use and cleared before the underlying memory is freed.
(We could remove this method, always call .cython_init() to
generate initialization code, and just let .cython_init()
generate empty code if no initialization is required; that would
generate empty loops, which are ugly and potentially might not
be optimized away.)
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_double.needs_cython_init_clear()
False
sage: ty_mpfr.needs_cython_init_clear()
True
sage: ty_python.needs_cython_init_clear()
True
"""
return False
def c_decl_type(self):
r"""
Give the C type for a single value of this type (as a string).
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_double.c_decl_type()
'double'
sage: ty_python.c_decl_type()
'PyObject*'
sage: ty_mpfr.c_decl_type()
'mpfr_t'
"""
raise NotImplementedError
def c_ptr_type(self):
r"""
Give the C type for a pointer to this type (as a reference to
either a single value or an array) (as a string).
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_double.c_ptr_type()
'double*'
sage: ty_python.c_ptr_type()
'PyObject**'
sage: ty_mpfr.c_ptr_type()
'mpfr_t*'
"""
return self.c_decl_type() + '*'
def c_reference_type(self):
r"""
Give the C type which should be used for passing a reference
to a single value in a call. This is used as the type for the
return value.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_double.c_reference_type()
'double*'
sage: ty_python.c_reference_type()
'PyObject**'
"""
return self.c_ptr_type()
def c_local_type(self):
r"""
Give the C type used for a value of this type inside an
instruction. For assignable/cheap_copy types, this is the
same as c_decl_type; for auto-reference types, this is the
pointer type.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_double.c_local_type()
'double'
sage: ty_python.c_local_type()
'PyObject*'
sage: ty_mpfr.c_local_type()
'mpfr_ptr'
"""
raise NotImplementedError
def assign_c_from_py(self, c, py):
r"""
Given a Cython variable/array reference/etc. of this storage type,
and a Python expression, generate code to assign to the Cython
variable from the Python expression.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_double.assign_c_from_py('foo', 'bar')
u'foo = bar'
sage: ty_python.assign_c_from_py('foo[i]', 'bar[j]')
u'foo[i] = <PyObject *>bar[j]; Py_INCREF(foo[i])'
sage: ty_mpfr.assign_c_from_py('foo', 'bar')
u'rn = self.domain(bar)\nmpfr_set(foo, rn.value, MPFR_RNDN)'
"""
return je("{{ c }} = {{ py }}", c=c, py=py)
def declare_chunk_class_members(self, name):
r"""
Return a string giving the declarations of the class members
in a wrapper class for a memory chunk with this storage type
and the given name.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_mpfr.declare_chunk_class_members('args')
u' cdef int _n_args\n cdef mpfr_t* _args\n'
"""
return je("""
{# XXX Variables here (and everywhere, really) should actually be Py_ssize_t #}
cdef int _n_{{ name }}
cdef {{ myself.cython_array_type() }} _{{ name }}
""", myself=self, name=name)
def alloc_chunk_data(self, name, len):
r"""
Return a string allocating the memory for the class members for
a memory chunk with this storage type and the given name.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: print(ty_mpfr.alloc_chunk_data('args', 'MY_LENGTH'))
self._n_args = MY_LENGTH
self._args = <mpfr_t*>sig_malloc(sizeof(mpfr_t) * MY_LENGTH)
if self._args == NULL: raise MemoryError
for i in range(MY_LENGTH):
mpfr_init2(self._args[i], self.domain.prec())
<BLANKLINE>
"""
return je("""
self._n_{{ name }} = {{ len }}
self._{{ name }} = <{{ myself.c_ptr_type() }}>sig_malloc(sizeof({{ myself.c_decl_type() }}) * {{ len }})
if self._{{ name }} == NULL: raise MemoryError
{% if myself.needs_cython_init_clear() %}
for i in range({{ len }}):
{{ myself.cython_init('self._%s[i]' % name) }}
{% endif %}
""", myself=self, name=name, len=len)
def dealloc_chunk_data(self, name):
r"""
Return a string to be put in the __dealloc__ method of a
wrapper class using a memory chunk with this storage type, to
deallocate the corresponding class members.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: print(ty_double.dealloc_chunk_data('args'))
if self._args:
sig_free(self._args)
<BLANKLINE>
sage: print(ty_mpfr.dealloc_chunk_data('constants'))
if self._constants:
for i in range(self._n_constants):
mpfr_clear(self._constants[i])
sig_free(self._constants)
<BLANKLINE>
"""
return je("""
if self._{{ name }}:
{% if myself.needs_cython_init_clear() %}
for i in range(self._n_{{ name }}):
{{ myself.cython_clear('self._%s[i]' % name) }}
{% endif %}
sig_free(self._{{ name }})
""", myself=self, name=name)
class StorageTypeAssignable(StorageType):
r"""
StorageTypeAssignable is a subtype of StorageType that deals with
types with cheap copies, like primitive types and PyObject*.
"""
def __init__(self, ty):
r"""
Initializes the property type (the C/Cython name for this type),
as well as the properties described in the documentation for
StorageType.__init__.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_double.class_member_declarations
''
sage: ty_double.class_member_initializations
''
sage: ty_double.local_declarations
''
sage: ty_double.type
'double'
sage: ty_python.type
'PyObject*'
"""
StorageType.__init__(self)
self.type = ty
def cheap_copies(self):
r"""
Returns True or False, depending on whether this StorageType
supports cheap copies -- whether it is cheap to copy values of
this type from one location to another. (See StorageType.cheap_copies
for more on this property.)
Since having cheap copies is essentially the definition of
StorageTypeAssignable, this always returns True.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_double.cheap_copies()
True
sage: ty_python.cheap_copies()
True
"""
return True
def c_decl_type(self):
r"""
Give the C type for a single value of this type (as a string).
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_double.c_decl_type()
'double'
sage: ty_python.c_decl_type()
'PyObject*'
"""
return self.type
def c_local_type(self):
r"""
Give the C type used for a value of this type inside an
instruction. For assignable/cheap_copy types, this is the
same as c_decl_type; for auto-reference types, this is the
pointer type.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_double.c_local_type()
'double'
sage: ty_python.c_local_type()
'PyObject*'
"""
return self.type
class StorageTypeSimple(StorageTypeAssignable):
r"""
StorageTypeSimple is a subtype of StorageTypeAssignable that deals
with non-reference-counted types with cheap copies, like primitive
types. As of yet, it has no functionality differences from
StorageTypeAssignable.
"""
pass
ty_int = StorageTypeSimple('int')
ty_double = StorageTypeSimple('double')
class StorageTypeDoubleComplex(StorageTypeSimple):
r"""
This is specific to the complex double type. It behaves exactly
like a StorageTypeSimple in C, but needs a little help to do
conversions in Cython.
This uses functions defined in CDFInterpreter, and is for use in
that context.
"""
def assign_c_from_py(self, c, py):
"""
sage: from sage_setup.autogen.interpreters import ty_double_complex
sage: ty_double_complex.assign_c_from_py('z_c', 'z_py')
u'z_c = CDE_to_dz(z_py)'
"""
return je("{{ c }} = CDE_to_dz({{ py }})", c=c, py=py)
ty_double_complex = StorageTypeDoubleComplex('double_complex')
class StorageTypePython(StorageTypeAssignable):
r"""
StorageTypePython is a subtype of StorageTypeAssignable that deals
with Python objects.
Just allocating an array full of PyObject* leads to problems,
because the Python garbage collector must be able to get to every
Python object, and it wouldn't know how to get to these arrays.
So we allocate the array as a Python list, but then we immediately
pull the ob_item out of it and deal only with that from then on.
We often leave these lists with NULL entries. This is safe for
the garbage collector and the deallocator, which is all we care
about; but it would be unsafe to provide Python-level access to
these lists.
There is one special thing about StorageTypePython: memory that is
used by the interpreter as scratch space (for example, the stack)
must be cleared after each call (so we don't hold on to
potentially-large objects and waste memory). Since we have to do
this anyway, the interpreter gains a tiny bit of speed by assuming
that the scratch space is cleared on entry; for example, when
pushing a value onto the stack, it doesn't bother to XDECREF the
previous value because it's always NULL.
"""
def __init__(self):
r"""
Initializes the properties described in the documentation
for StorageTypeAssignable.__init__. The type is always
'PyObject*'.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_python.class_member_declarations
''
sage: ty_python.class_member_initializations
''
sage: ty_python.local_declarations
''
sage: ty_python.type
'PyObject*'
"""
StorageTypeAssignable.__init__(self, 'PyObject*')
def python_refcounted(self):
r"""
Says whether this storage type is a Python type, so we need to
use INCREF/DECREF.
Returns True.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_python.python_refcounted()
True
"""
return True
def cython_decl_type(self):
r"""
Give the Cython type for a single value of this type (as a string).
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_python.cython_decl_type()
'object'
"""
return 'object'
def declare_chunk_class_members(self, name):
r"""
Return a string giving the declarations of the class members
in a wrapper class for a memory chunk with this storage type
and the given name.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_python.declare_chunk_class_members('args')
u' cdef object _list_args\n cdef int _n_args\n cdef PyObject** _args\n'
"""
return je("""
cdef object _list_{{ name }}
cdef int _n_{{ name }}
cdef {{ myself.cython_array_type() }} _{{ name }}
""", myself=self, name=name)
def alloc_chunk_data(self, name, len):
r"""
Return a string allocating the memory for the class members for
a memory chunk with this storage type and the given name.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: print(ty_python.alloc_chunk_data('args', 'MY_LENGTH'))
self._n_args = MY_LENGTH
self._list_args = PyList_New(self._n_args)
self._args = (<PyListObject *>self._list_args).ob_item
<BLANKLINE>
"""
return je("""
self._n_{{ name }} = {{ len }}
self._list_{{ name }} = PyList_New(self._n_{{ name }})
self._{{ name }} = (<PyListObject *>self._list_{{ name }}).ob_item
""", myself=self, name=name, len=len)
def dealloc_chunk_data(self, name):
r"""
Return a string to be put in the __dealloc__ method of a
wrapper class using a memory chunk with this storage type, to
deallocate the corresponding class members.
Our array was allocated as a Python list; this means we actually
don't need to do anything to deallocate it.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_python.dealloc_chunk_data('args')
''
"""
return ''
def needs_cython_init_clear(self):
r"""
Says whether values/arrays of this type need to be initialized
before use and cleared before the underlying memory is freed.
Returns True.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_python.needs_cython_init_clear()
True
"""
return True
def assign_c_from_py(self, c, py):
r"""
Given a Cython variable/array reference/etc. of this storage type,
and a Python expression, generate code to assign to the Cython
variable from the Python expression.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_python.assign_c_from_py('foo[i]', 'bar[j]')
u'foo[i] = <PyObject *>bar[j]; Py_INCREF(foo[i])'
"""
return je("""{{ c }} = <PyObject *>{{ py }}; Py_INCREF({{ c }})""",
c=c, py=py)
def cython_init(self, loc):
r"""
Generates code to initialize a variable (or array reference)
holding a PyObject*. Sets it to NULL.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_python.cython_init('foo[i]')
u'foo[i] = NULL'
"""
return je("{{ loc }} = NULL", loc=loc)
def cython_clear(self, loc):
r"""
Generates code to clear a variable (or array reference) holding
a PyObject*.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_python.cython_clear('foo[i]')
u'Py_CLEAR(foo[i])'
"""
return je("Py_CLEAR({{ loc }})", loc=loc)
ty_python = StorageTypePython()
class StorageTypeAutoReference(StorageType):
r"""
StorageTypeAutoReference is a subtype of StorageType that deals with
types in the style of GMP/MPIR/MPFR/MPFI/FLINT, where copies are
not cheap, functions expect arguments to be passed by reference,
and the API takes advantage of the C quirk where arrays are
automatically converted to pointers to automatically pass
arguments by reference.
"""
def __init__(self, decl_ty, ref_ty):
r"""
Initializes the properties decl_type and ref_type (the C type
names used when declaring variables and function parameters,
respectively), as well as the properties described in
the documentation for StorageType.__init__.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_mpfr.class_member_declarations
'cdef RealField_class domain\n'
sage: ty_mpfr.class_member_initializations
"self.domain = args['domain']\n"
sage: ty_mpfr.local_declarations
'cdef RealNumber rn\n'
sage: ty_mpfr.decl_type
'mpfr_t'
sage: ty_mpfr.ref_type
'mpfr_ptr'
"""
StorageType.__init__(self)
self.decl_type = decl_ty
self.ref_type = ref_ty
def c_decl_type(self):
r"""
Give the C type for a single value of this type (as a string).
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_mpfr.c_decl_type()
'mpfr_t'
"""
return self.decl_type
def c_local_type(self):
r"""
Give the C type used for a value of this type inside an
instruction. For assignable/cheap_copy types, this is the
same as c_decl_type; for auto-reference types, this is the
pointer type.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_mpfr.c_local_type()
'mpfr_ptr'
"""
return self.ref_type
def c_reference_type(self):
r"""
Give the C type which should be used for passing a reference
to a single value in a call. This is used as the type for the
return value.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_mpfr.c_reference_type()
'mpfr_t'
"""
return self.decl_type
def needs_cython_init_clear(self):
r"""
Says whether values/arrays of this type need to be initialized
before use and cleared before the underlying memory is freed.
All known examples of auto-reference types do need a special
initialization call, so this always returns True.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_mpfr.needs_cython_init_clear()
True
"""
return True
class StorageTypeMPFR(StorageTypeAutoReference):
r"""
StorageTypeMPFR is a subtype of StorageTypeAutoReference that deals
the MPFR's mpfr_t type.
For any given program that we're interpreting, ty_mpfr can only
refer to a single precision. An interpreter that needs to use
two precisions of mpfr_t in the same program should instantiate two
separate instances of StorageTypeMPFR. (Interpreters that need
to handle arbitrarily many precisions in the same program are not
handled at all.)
"""
def __init__(self, id=''):
r"""
Initializes the id property, as well as the properties described
in the documentation for StorageTypeAutoReference.__init__.
The id property is used if you want to have an interpreter
that handles two instances of StorageTypeMPFR (that is,
handles mpfr_t variables at two different precisions
simultaneously). It's a string that's used to generate
variable names that don't conflict. (The id system has
never actually been used, so bugs probably remain.)
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_mpfr.class_member_declarations
'cdef RealField_class domain\n'
sage: ty_mpfr.class_member_initializations
"self.domain = args['domain']\n"
sage: ty_mpfr.local_declarations
'cdef RealNumber rn\n'
sage: ty_mpfr.decl_type
'mpfr_t'
sage: ty_mpfr.ref_type
'mpfr_ptr'
TESTS::
sage: ty_mpfr2 = StorageTypeMPFR(id='_the_second')
sage: ty_mpfr2.class_member_declarations
'cdef RealField_class domain_the_second\n'
sage: ty_mpfr2.class_member_initializations
"self.domain_the_second = args['domain_the_second']\n"
sage: ty_mpfr2.local_declarations
'cdef RealNumber rn_the_second\n'
"""
StorageTypeAutoReference.__init__(self, 'mpfr_t', 'mpfr_ptr')
self.id = id
self.class_member_declarations = "cdef RealField_class domain%s\n" % self.id
self.class_member_initializations = \
"self.domain%s = args['domain%s']\n" % (self.id, self.id)
self.local_declarations = "cdef RealNumber rn%s\n" % self.id
def cython_init(self, loc):
r"""
Generates code to initialize an mpfr_t reference (a variable, an
array reference, etc.)
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_mpfr.cython_init('foo[i]')
u'mpfr_init2(foo[i], self.domain.prec())'
"""
return je("mpfr_init2({{ loc }}, self.domain{{ myself.id }}.prec())",
myself=self, loc=loc)
def cython_clear(self, loc):
r"""
Generates code to clear an mpfr_t reference (a variable, an
array reference, etc.)
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_mpfr.cython_clear('foo[i]')
'mpfr_clear(foo[i])'
"""
return 'mpfr_clear(%s)' % loc
def assign_c_from_py(self, c, py):
r"""
Given a Cython variable/array reference/etc. of this storage type,
and a Python expression, generate code to assign to the Cython
variable from the Python expression.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: ty_mpfr.assign_c_from_py('foo[i]', 'bar[j]')
u'rn = self.domain(bar[j])\nmpfr_set(foo[i], rn.value, MPFR_RNDN)'
"""
return je("""
rn{{ myself.id }} = self.domain({{ py }})
mpfr_set({{ c }}, rn.value, MPFR_RNDN)""", myself=self, c=c, py=py)
ty_mpfr = StorageTypeMPFR()
class MemoryChunk(object):
r"""
Memory chunks control allocation, deallocation, initialization,
etc. of the vectors and objects in the interpreter. Basically,
there is one memory chunk per argument to the C interpreter.
There are three "generic" varieties of memory chunk: "constants",
"arguments", and "scratch". These are named after their most
common use, but they could be used for other things in some
interpreters.
All three kinds of chunks are allocated in the wrapper class.
Constants are initialized when the wrapper is constructed;
arguments are initialized in the __call__ method, from the
caller's arguments. "scratch" chunks are not initialized at all;
they are used for scratch storage (often, but not necessarily, for
a stack) in the interpreter.
Interpreters which need memory chunks that don't fit into these
categories can create new subclasses of MemoryChunk.
"""
def __init__(self, name, storage_type):
r"""
Initialize an instance of MemoryChunk.
This sets the properties "name" (the name of this memory chunk;
used in generated variable names, etc.) and "storage_type",
which is a StorageType object.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkArguments('args', ty_mpfr)
sage: mc.name
'args'
sage: mc.storage_type is ty_mpfr
True
"""
self.name = name
self.storage_type = storage_type
def __repr__(self):
r"""
Give a string representation of this memory chunk.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkArguments('args', ty_mpfr)
sage: mc
{MC:args}
sage: mc.__repr__()
'{MC:args}'
"""
return '{MC:%s}' % self.name
def declare_class_members(self):
r"""
Return a string giving the declarations of the class members
in a wrapper class for this memory chunk.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkArguments('args', ty_mpfr)
sage: mc.declare_class_members()
u' cdef int _n_args\n cdef mpfr_t* _args\n'
"""
return self.storage_type.declare_chunk_class_members(self.name)
def init_class_members(self):
r"""
Return a string to be put in the __init__ method of a wrapper
class using this memory chunk, to initialize the corresponding
class members.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkArguments('args', ty_mpfr)
sage: print(mc.init_class_members())
count = args['args']
self._n_args = count
self._args = <mpfr_t*>sig_malloc(sizeof(mpfr_t) * count)
if self._args == NULL: raise MemoryError
for i in range(count):
mpfr_init2(self._args[i], self.domain.prec())
<BLANKLINE>
"""
return ""
def dealloc_class_members(self):
r"""
Return a string to be put in the __dealloc__ method of a wrapper
class using this memory chunk, to deallocate the corresponding
class members.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkArguments('args', ty_mpfr)
sage: print(mc.dealloc_class_members())
if self._args:
for i in range(self._n_args):
mpfr_clear(self._args[i])
sig_free(self._args)
<BLANKLINE>
"""
return ""
def declare_parameter(self):
r"""
Return the string to use to declare the interpreter parameter
corresponding to this memory chunk.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkArguments('args', ty_mpfr)
sage: mc.declare_parameter()
'mpfr_t* args'
"""
return '%s %s' % (self.storage_type.c_ptr_type(), self.name)
def declare_call_locals(self):
r"""
Return a string to put in the __call__ method of a wrapper
class using this memory chunk, to allocate local variables.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkRRRetval('retval', ty_mpfr)
sage: mc.declare_call_locals()
u' cdef RealNumber retval = (self.domain)()\n'
"""
return ""
def pass_argument(self):
r"""
Return the string to pass the argument corresponding to this
memory chunk to the interpreter.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkConstants('constants', ty_mpfr)
sage: mc.pass_argument()
'self._constants'
"""
raise NotImplementedError
def pass_call_c_argument(self):
r"""
Return the string to pass the argument corresponding to this
memory chunk to the interpreter, for use in the call_c method.
Almost always the same as pass_argument.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkConstants('constants', ty_mpfr)
sage: mc.pass_call_c_argument()
'self._constants'
"""
return self.pass_argument()
def needs_cleanup_on_error(self):
r"""
In an interpreter that can terminate prematurely (due to an
exception from calling Python code, or divide by zero, or
whatever) it will just return at the end of the current instruction,
skipping the rest of the program. Thus, it may still have
values pushed on the stack, etc.
This method returns True if this memory chunk is modified by the
interpreter and needs some sort of cleanup when an error happens.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkConstants('constants', ty_mpfr)
sage: mc.needs_cleanup_on_error()
False
"""
return False
def is_stack(self):
r"""
Says whether this memory chunk is a stack. This affects code
generation for instructions using this memory chunk.
It would be nicer to make this object-oriented somehow, so
that the code generator called MemoryChunk methods instead of
using::
if ch.is_stack():
... hardcoded stack code
else:
... hardcoded non-stack code
but that hasn't been done yet.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkScratch('scratch', ty_mpfr)
sage: mc.is_stack()
False
sage: mc = MemoryChunkScratch('stack', ty_mpfr, is_stack=True)
sage: mc.is_stack()
True
"""
return False
def is_python_refcounted_stack(self):
r"""
Says whether this memory chunk refers to a stack where the entries
need to be INCREF/DECREF'ed.
It would be nice to make this object-oriented, so that the
code generator called MemoryChunk methods to do the potential
INCREF/DECREF and didn't have to explicitly test
is_python_refcounted_stack.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkScratch('args', ty_python)
sage: mc.is_python_refcounted_stack()
False
sage: mc = MemoryChunkScratch('args', ty_python, is_stack=True)
sage: mc.is_python_refcounted_stack()
True
sage: mc = MemoryChunkScratch('args', ty_mpfr, is_stack=True)
sage: mc.is_python_refcounted_stack()
False
"""
return self.is_stack() and self.storage_type.python_refcounted()
class MemoryChunkLonglivedArray(MemoryChunk):
r"""
MemoryChunkLonglivedArray is a subtype of MemoryChunk that deals
with memory chunks that are both 1) allocated as class members (rather
than being allocated in __call__) and 2) are arrays.
"""
def init_class_members(self):
r"""
Return a string to be put in the __init__ method of a wrapper
class using this memory chunk, to initialize the corresponding
class members.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkArguments('args', ty_double)
sage: print(mc.init_class_members())
count = args['args']
self._n_args = count
self._args = <double*>sig_malloc(sizeof(double) * count)
if self._args == NULL: raise MemoryError
<BLANKLINE>
"""
return je("""
count = args['{{ myself.name }}']
{% print(myself.storage_type.alloc_chunk_data(myself.name, 'count')) %}
""", myself=self)
def dealloc_class_members(self):
r"""
Return a string to be put in the __dealloc__ method of a wrapper
class using this memory chunk, to deallocate the corresponding
class members.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkArguments('args', ty_mpfr)
sage: print(mc.dealloc_class_members())
if self._args:
for i in range(self._n_args):
mpfr_clear(self._args[i])
sig_free(self._args)
<BLANKLINE>
"""
return self.storage_type.dealloc_chunk_data(self.name)
def pass_argument(self):
r"""
Return the string to pass the argument corresponding to this
memory chunk to the interpreter.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkConstants('constants', ty_mpfr)
sage: mc.pass_argument()
'self._constants'
"""
return 'self._%s' % self.name
class MemoryChunkConstants(MemoryChunkLonglivedArray):
r"""
MemoryChunkConstants is a subtype of MemoryChunkLonglivedArray.
MemoryChunkConstants chunks have their contents set in the
wrapper's __init__ method (and not changed afterward).
"""
def init_class_members(self):
r"""
Return a string to be put in the __init__ method of a wrapper
class using this memory chunk, to initialize the corresponding
class members.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkConstants('constants', ty_mpfr)
sage: print(mc.init_class_members())
val = args['constants']
self._n_constants = len(val)
self._constants = <mpfr_t*>sig_malloc(sizeof(mpfr_t) * len(val))
if self._constants == NULL: raise MemoryError
for i in range(len(val)):
mpfr_init2(self._constants[i], self.domain.prec())
for i in range(len(val)):
rn = self.domain(val[i])
mpfr_set(self._constants[i], rn.value, MPFR_RNDN)
<BLANKLINE>
"""
return je("""
val = args['{{ myself.name }}']
{% print(myself.storage_type.alloc_chunk_data(myself.name, 'len(val)')) %}
for i in range(len(val)):
{{ myself.storage_type.assign_c_from_py('self._%s[i]' % myself.name, 'val[i]') | i(12) }}
""", myself=self)
class MemoryChunkArguments(MemoryChunkLonglivedArray):
r"""
MemoryChunkArguments is a subtype of MemoryChunkLonglivedArray,
for dealing with arguments to the wrapper's ``__call__`` method.
Currently the ``__call__`` method is declared to take a varargs
`*args` argument tuple. We assume that the MemoryChunk named `args`
will deal with that tuple.
"""
def setup_args(self):
r"""
Handle the arguments of __call__ -- copy them into a pre-allocated
array, ready to pass to the interpreter.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkArguments('args', ty_mpfr)
sage: print(mc.setup_args())
cdef mpfr_t* c_args = self._args
cdef int i
for i from 0 <= i < len(args):
rn = self.domain(args[i])
mpfr_set(self._args[i], rn.value, MPFR_RNDN)
<BLANKLINE>
"""
return je("""
cdef {{ myself.storage_type.c_ptr_type() }} c_args = self._args
cdef int i
for i from 0 <= i < len(args):
{{ myself.storage_type.assign_c_from_py('self._args[i]', 'args[i]') | i(4) }}
""", myself=self)
def pass_argument(self):
r"""
Return the string to pass the argument corresponding to this
memory chunk to the interpreter.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkArguments('args', ty_mpfr)
sage: mc.pass_argument()
'c_args'
"""
return 'c_args'
class MemoryChunkScratch(MemoryChunkLonglivedArray):
r"""
MemoryChunkScratch is a subtype of MemoryChunkLonglivedArray
for dealing with memory chunks that are allocated in the wrapper,
but only used in the interpreter -- stacks, scratch registers, etc.
(Currently these are only used as stacks.)
"""
def __init__(self, name, storage_type, is_stack=False):
r"""
Initialize an instance of MemoryChunkScratch.
Initializes the _is_stack property, as well as
the properties described in the documentation for
MemoryChunk.__init__.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkScratch('stack', ty_double, is_stack=True)
sage: mc.name
'stack'
sage: mc.storage_type is ty_double
True
sage: mc._is_stack
True
"""
MemoryChunkLonglivedArray.__init__(self, name, storage_type)
self._is_stack = is_stack
def is_stack(self):
r"""
Says whether this memory chunk is a stack. This affects code
generation for instructions using this memory chunk.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkScratch('stack', ty_mpfr, is_stack=True)
sage: mc.is_stack()
True
"""
return self._is_stack
def needs_cleanup_on_error(self):
r"""
In an interpreter that can terminate prematurely (due to an
exception from calling Python code, or divide by zero, or
whatever) it will just return at the end of the current instruction,
skipping the rest of the program. Thus, it may still have
values pushed on the stack, etc.
This method returns True if this memory chunk is modified by the
interpreter and needs some sort of cleanup when an error happens.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkScratch('registers', ty_python)
sage: mc.needs_cleanup_on_error()
True
"""
return self.storage_type.python_refcounted()
def handle_cleanup(self):
r"""
Handle the cleanup if the interpreter exits with an error.
For scratch/stack chunks that hold Python-refcounted values,
we assume that they are filled with NULL on every entry to the
interpreter. If the interpreter exited with an error, it may
have left values in the chunk, so we need to go through
the chunk and Py_CLEAR it.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkScratch('registers', ty_python)
sage: print(mc.handle_cleanup())
for i in range(self._n_registers):
Py_CLEAR(self._registers[i])
<BLANKLINE>
"""
# XXX This is a lot slower than it needs to be, because
# we don't have a "cdef int i" in scope here.
return je("""
for i in range(self._n_{{ myself.name }}):
Py_CLEAR(self._{{ myself.name }}[i])
""", myself=self)
class MemoryChunkRRRetval(MemoryChunk):
r"""
A special-purpose memory chunk, for dealing with the return value
of the RR-based interpreter.
"""
def declare_class_members(self):
r"""
Return a string giving the declarations of the class members
in a wrapper class for this memory chunk.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkRRRetval('retval', ty_mpfr)
sage: mc.declare_class_members()
''
"""
return ""
def declare_call_locals(self):
r"""
Return a string to put in the __call__ method of a wrapper
class using this memory chunk, to allocate local variables.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkRRRetval('retval', ty_mpfr)
sage: mc.declare_call_locals()
u' cdef RealNumber retval = (self.domain)()\n'
"""
return je("""
cdef RealNumber {{ myself.name }} = (self.domain)()
""", myself=self)
def declare_parameter(self):
r"""
Return the string to use to declare the interpreter parameter
corresponding to this memory chunk.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkRRRetval('retval', ty_mpfr)
sage: mc.declare_parameter()
'mpfr_t retval'
"""
return '%s %s' % (self.storage_type.c_reference_type(), self.name)
def pass_argument(self):
r"""
Return the string to pass the argument corresponding to this
memory chunk to the interpreter.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkRRRetval('retval', ty_mpfr)
sage: mc.pass_argument()
u'retval.value'
"""
return je("""{{ myself.name }}.value""", myself=self)
def pass_call_c_argument(self):
r"""
Return the string to pass the argument corresponding to this
memory chunk to the interpreter, for use in the call_c method.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkRRRetval('retval', ty_mpfr)
sage: mc.pass_call_c_argument()
'result'
"""
return "result"
class MemoryChunkPythonArguments(MemoryChunk):
r"""
A special-purpose memory chunk, for the generic Python-object based
interpreter. Rather than copy the arguments into an array allocated
in the wrapper, we use the PyTupleObject internals and pass the array
that's inside the argument tuple.
"""
def declare_class_members(self):
r"""
Return a string giving the declarations of the class members
in a wrapper class for this memory chunk.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkPythonArguments('args', ty_python)
"""
return " cdef int _n_%s\n" % self.name
def init_class_members(self):
r"""
Return a string to be put in the __init__ method of a wrapper
class using this memory chunk, to initialize the corresponding
class members.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkPythonArguments('args', ty_python)
sage: mc.init_class_members()
u" count = args['args']\n self._n_args = count\n"
"""
return je("""
count = args['{{ myself.name }}']
self._n_args = count
""", myself=self)
def setup_args(self):
r"""
Handle the arguments of __call__. Nothing to do.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkPythonArguments('args', ty_python)
sage: mc.setup_args()
''
"""
return ''
def pass_argument(self):
r"""
Pass the innards of the argument tuple to the interpreter.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkPythonArguments('args', ty_python)
sage: mc.pass_argument()
'(<PyTupleObject*>args).ob_item'
"""
return "(<PyTupleObject*>args).ob_item"
class MemoryChunkElementArguments(MemoryChunkPythonArguments):
r"""
A special-purpose memory chunk, for the Python-object based
interpreters that want to process (and perhaps modify) the data.
We allocate a new list (via the map function) on every call to
hold the modified arguments. That's not strictly necessary --
we could pre-allocate a list and map into it -- but this lets us
use simpler code for a very-likely-negligible efficiency cost.
(The Element interpreter is going to allocate lots of objects
as it runs, anyway.)
"""
def setup_args(self):
r"""
Handle the arguments of __call__. Note: This hardcodes
"self._domain".
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkElementArguments('args', ty_python)
sage: mc.setup_args()
'mapped_args = map(self._domain, args)\n'
"""
return "mapped_args = map(self._domain, args)\n"
def pass_argument(self):
r"""
Pass the innards of the argument tuple to the interpreter.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkElementArguments('args', ty_python)
sage: mc.pass_argument()
'(<PyListObject*>mapped_args).ob_item'
"""
return "(<PyListObject*>mapped_args).ob_item"
class MemoryChunkPyConstant(MemoryChunk):
r"""
A special-purpose memory chunk, for holding a single Python constant
and passing it to the interpreter as a PyObject*.
"""
def __init__(self, name):
r"""
Initialize an instance of MemoryChunkPyConstant.
Always uses the type ty_python.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkPyConstant('domain')
sage: mc.name
'domain'
sage: mc.storage_type is ty_python
True
"""
MemoryChunk.__init__(self, name, ty_python)
def declare_class_members(self):
r"""
Return a string giving the declarations of the class members
in a wrapper class for this memory chunk.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkPyConstant('domain')
sage: mc.declare_class_members()
u' cdef object _domain\n'
"""
return je("""
cdef object _{{ myself.name }}
""", myself=self)
def init_class_members(self):
r"""
Return a string to be put in the __init__ method of a wrapper
class using this memory chunk, to initialize the corresponding
class members.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkPyConstant('domain')
sage: mc.init_class_members()
u" self._domain = args['domain']\n"
"""
return je("""
self._{{ myself.name }} = args['{{ myself.name }}']
""", myself=self)
def declare_parameter(self):
r"""
Return the string to use to declare the interpreter parameter
corresponding to this memory chunk.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkPyConstant('domain')
sage: mc.declare_parameter()
'PyObject* domain'
"""
return 'PyObject* %s' % self.name
def pass_argument(self):
r"""
Return the string to pass the argument corresponding to this
memory chunk to the interpreter.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc = MemoryChunkPyConstant('domain')
sage: mc.pass_argument()
'<PyObject*>self._domain'
"""
return '<PyObject*>self._%s' % self.name
def params_gen(**chunks):
r"""
Instructions have a parameter specification that says where they get
their inputs and where their outputs go. Each parameter has
the same form: it is a triple (chunk, addr, len). The chunk says
where the parameter is read from/written to. The addr says which
value in the chunk is used. If the chunk is a stack chunk, then
addr must be null; the value will be read from/written to the top
of the stack. Otherwise, addr must be an integer, or another chunk;
if addr is another chunk, then the next value is read from that chunk
to be the address.
The len says how many values to read/write. It can be either None
(meaning to read/write only a single value), an integer, or
another chunk; if it is a chunk, then the next value is read from that
chunk to be the len. Note that specifying len changes the types
given to the instruction, so len==None is different than len==1 even
though both mean to use a single value.
These parameter specifications are cumbersome to write by hand, so
there's also a simple string format for them. This (curried)
function parses the simple string format and produces parameter
specifications. The params_gen function takes keyword arguments
mapping single-character names to memory chunks. The string format
uses these names. The params_gen function returns another function,
that takes two strings and returns a pair of lists of parameter
specifications.
Each string is the concatenation of arbitrarily many specifications.
Each specification consists of an address and a length. The
address is either a single character naming a stack chunk,
or a string of the form 'A[B]' where A names a non-stack chunk
and B names the code chunk. The length is either empty, or '@n'
for a number n (meaning to use that many arguments), or '@C', where
C is the code chunk.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc_stack = MemoryChunkScratch('stack', ty_double, is_stack=True)
sage: mc_args = MemoryChunkArguments('args', ty_double)
sage: mc_code = MemoryChunkConstants('code', ty_int)
sage: pg = params_gen(D=mc_code, A=mc_args, S=mc_stack)
sage: pg('S', '')
([({MC:stack}, None, None)], [])
sage: pg('A[D]', '')
([({MC:args}, {MC:code}, None)], [])
sage: pg('S@5', '')
([({MC:stack}, None, 5)], [])
sage: pg('S@D', '')
([({MC:stack}, None, {MC:code})], [])
sage: pg('A[D]@D', '')
([({MC:args}, {MC:code}, {MC:code})], [])
sage: pg('SSS@D', 'A[D]S@D')
([({MC:stack}, None, None), ({MC:stack}, None, None), ({MC:stack}, None, {MC:code})], [({MC:args}, {MC:code}, None), ({MC:stack}, None, {MC:code})])
"""
def make_params(s):
p = []
s = s.strip()
while s:
chunk_code = s[0]
s = s[1:]
chunk = chunks[chunk_code]
addr = None
ch_len = None
# shouldn't hardcode 'code' here
if chunk.is_stack() or chunk.name == 'code':
pass
else:
m = re.match(r'\[(?:([0-9]+)|([a-zA-Z]))\]', s)
if m.group(1):
addr = int(m.group(1))
else:
ch = chunks[m.group(2)]
assert ch.storage_type is ty_int
addr = ch
s = s[m.end():].strip()
if len(s) and s[0] == '@':
m = re.match(r'@(?:([0-9]+)|([a-zA-Z]))', s)
if m.group(1):
ch_len = int(m.group(1))
else:
ch = chunks[m.group(2)]
assert ch.storage_type is ty_int
ch_len = ch
s = s[m.end():].strip()
p.append((chunk, addr, ch_len))
return p
def params(s_ins, s_outs):
ins = make_params(s_ins)
outs = make_params(s_outs)
return (ins, outs)
return params
def string_of_addr(a):
r"""
An address or a length from a parameter specification may be
either None, an integer, or a MemoryChunk. If the address or
length is an integer or a MemoryChunk, this function will convert
it to a string giving an expression that will evaluate to the correct
address or length. (See the docstring for params_gen for more
information on parameter specifications.)
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: mc_code = MemoryChunkConstants('code', ty_int)
sage: string_of_addr(mc_code)
'*code++'
sage: string_of_addr(42r)
'42'
"""
if isinstance(a, six.integer_types):
return str(a)
assert(isinstance(a, MemoryChunk))
return '*%s++' % a.name
class InstrSpec(object):
r"""
Each instruction in an interpreter is represented as an InstrSpec.
This contains all the information that we need to generate code
to interpret the instruction; it also is used to build the tables
that fast_callable uses, so this is the nexus point between
users of the interpreter (possibly pure Python) and the
generated C interpreter.
The underlying instructions are matched to the caller by name.
For instance, fast_callable assumes that if the interpreter has an
instruction named 'cos', then it will take a single argument,
return a single result, and implement the cos() function.
The print representation of an instruction (which will probably
only be used when doctesting this file) consists of the name,
a simplified stack effect, and the code (truncated if it's long).
The stack effect has two parts, the input and the output, separated
by '->'; the input shows what will be popped from the stack,
the output what will be placed on the stack. Each consists of
a sequence of 'S' and '*' characters, where 'S' refers to a single
argument and '*' refers to a variable number of arguments.
The code for an instruction is a small snippet of C code. It has
available variables 'i0', 'i1', ..., 'o0', 'o1', ...; one variable
for each input and output; its job is to assign values to the output
variables, based on the values of the input variables.
Normally, in an interpreter that uses doubles, each of the input
and output variables will be a double. If i0 actually represents
a variable number of arguments, then it will be a pointer to
double instead, and there will be another variable n_i0 giving
the actual number of arguments.
When instructions refer to auto-reference types, they actually
get a pointer to the data in its original location; it is
not copied into a local variable. Mostly, this makes no difference,
but there is one potential problem to be aware of. It is possible
for an output variable to point to the same object as an input
variable; in fact, this usually will happen when you're working
with the stack. If the instruction maps to a single function call,
then this is fine; the standard auto-reference implementations
(GMP, MPFR, etc.) are careful to allow having the input and output
be the same. But if the instruction maps to multiple function
calls, you may need to use a temporary variable.
Here's an example of this issue. Suppose you want to make an
instruction that does ``out = a+b*c``. You write code like this::
out = b*c
out = a+out
But out will actually share the same storage as a; so the first line
modifies a, and you actually end up computing 2*(b+c). The fix
is to only write to the output once, at the very end of your
instruction.
Instructions are also allowed to access memory chunks (other than
the stack and code) directly. They are available as C variables
with the same name as the chunk. This is useful if some type of
memory chunk doesn't fit well with the params_gen interface.
There are additional reference-counting rules that must be
followed if your interpreter operates on Python objects; these
rules are described in the docstring of the PythonInterpreter
class.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: pg = RDFInterpreter().pg
sage: InstrSpec('add', pg('SS','S'), code='o0 = i0+i1;')
add: SS->S = 'o0 = i0+i1;'
"""
def __init__(self, name, io, code=None, uses_error_handler=False, handles_own_decref=False):
r"""
Initialize an InstrSpec.
INPUTS:
name -- the name of the instruction
io -- a pair of lists of parameter specifications for I/O of the
instruction
code -- a string containing a snippet of C code to read
from the input variables and write to the output variables
uses_error_handler -- True if the instruction calls Python
and jumps to error: on a Python error
handles_own_decref -- True if the instruction handles Python
objects and includes its own
reference-counting
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: pg = RDFInterpreter().pg
sage: InstrSpec('add', pg('SS','S'), code='o0 = i0+i1;')
add: SS->S = 'o0 = i0+i1;'
sage: instr = InstrSpec('py_call', pg('P[D]S@D', 'S'), code=('This is very complicated. ' + 'blah ' * 30)); instr
py_call: *->S = 'This is very compli... blah blah blah '
sage: instr.name
'py_call'
sage: instr.inputs
[({MC:py_constants}, {MC:code}, None), ({MC:stack}, None, {MC:code})]
sage: instr.outputs
[({MC:stack}, None, None)]
sage: instr.code
'This is very complicated. blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah '
sage: instr.parameters
['py_constants', 'n_inputs']
sage: instr.n_inputs
0
sage: instr.n_outputs
1
"""
self.name = name
self.inputs = io[0]
self.outputs = io[1]
self.uses_error_handler = uses_error_handler
self.handles_own_decref = handles_own_decref
if code is not None:
self.code = code
# XXX We assume that there is only one stack
n_inputs = 0
n_outputs = 0
in_effect = ''
out_effect = ''
p = []
for (ch, addr, len) in self.inputs:
if ch.is_stack():
if len is None:
n_inputs += 1
in_effect += 'S'
elif isinstance(len, six.integer_types):
n_inputs += len
in_effect += 'S%d' % len
else:
p.append('n_inputs')
in_effect += '*'
else:
p.append(ch.name)
for (ch, addr, len) in self.outputs:
if ch.is_stack():
if len is None:
n_outputs += 1
out_effect += 'S'
elif isinstance(len, six.integer_types):
n_outputs += len
out_effect += 'S%d' % len
else:
p.append('n_outputs')
out_effect += '*'
else:
p.append(ch.name)
self.parameters = p
self.n_inputs = n_inputs
self.n_outputs = n_outputs
self.in_effect = in_effect
self.out_effect = out_effect
def __repr__(self):
r"""
Produce a string representing a given instruction, consisting
of its name, a brief stack specification, and its code
(possibly abbreviated).
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: pg = RDFInterpreter().pg
sage: InstrSpec('add', pg('SS','S'), code='o0 = i0+i1;')
add: SS->S = 'o0 = i0+i1;'
"""
rcode = repr(self.code)
if len(rcode) > 40:
rcode = rcode[:20] + '...' + rcode[-17:]
return '%s: %s->%s = %s' % \
(self.name, self.in_effect, self.out_effect, rcode)
# Now we have a series of helper functions that make it slightly easier
# to create instructions.
def instr_infix(name, io, op):
r"""
A helper function for creating instructions implemented by
a single infix binary operator.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: pg = RDFInterpreter().pg
sage: instr_infix('mul', pg('SS', 'S'), '*')
mul: SS->S = 'o0 = i0 * i1;'
"""
return InstrSpec(name, io, code='o0 = i0 %s i1;' % op)
def instr_funcall_2args(name, io, op):
r"""
A helper function for creating instructions implemented by
a two-argument function call.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: pg = RDFInterpreter().pg
sage: instr_funcall_2args('atan2', pg('SS', 'S'), 'atan2')
atan2: SS->S = 'o0 = atan2(i0, i1);'
"""
return InstrSpec(name, io, code='o0 = %s(i0, i1);' % op)
def instr_unary(name, io, op):
r"""
A helper function for creating instructions with one input
and one output.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: pg = RDFInterpreter().pg
sage: instr_unary('sin', pg('S','S'), 'sin(i0)')
sin: S->S = 'o0 = sin(i0);'
sage: instr_unary('neg', pg('S','S'), '-i0')
neg: S->S = 'o0 = -i0;'
"""
return InstrSpec(name, io, code='o0 = ' + op + ';')
def instr_funcall_2args_mpfr(name, io, op):
r"""
A helper function for creating MPFR instructions with two inputs
and one output.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: pg = RRInterpreter().pg
sage: instr_funcall_2args_mpfr('add', pg('SS','S'), 'mpfr_add')
add: SS->S = 'mpfr_add(o0, i0, i1, MPFR_RNDN);'
"""
return InstrSpec(name, io, code='%s(o0, i0, i1, MPFR_RNDN);' % op)
def instr_funcall_1arg_mpfr(name, io, op):
r"""
A helper function for creating MPFR instructions with one input
and one output.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: pg = RRInterpreter().pg
sage: instr_funcall_1arg_mpfr('exp', pg('S','S'), 'mpfr_exp')
exp: S->S = 'mpfr_exp(o0, i0, MPFR_RNDN);'
"""
return InstrSpec(name, io, code='%s(o0, i0, MPFR_RNDN);' % op)
class InterpreterSpec(object):
r"""
Each interpreter to be generated by this module is represented
by an InterpreterSpec.
"""
def __init__(self):
r"""
Initialize an InterpreterSpec.
Initializes the following fields:
- ``c_header`` -- a code snippet to go at the top of the C
interpreter source file
- ``pxd_header`` -- a code snippet to go at the top of the
wrapper class .pxd file
- ``pyx_header`` -- a code snippet to go at the top of the
wrapper class source file
- ``err_return`` -- a string indicating the value to be
returned in case of a Python exception
- ``mc_code`` -- a memory chunk to use for the interpreted code
- ``extra_class_members`` -- Class members for the wrapper that
don't correspond to memory chunks
- ``extra_members_initialize`` -- Code to initialize
extra_class_members
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: interp = RDFInterpreter()
sage: interp.c_header
'#include <gsl/gsl_math.h>'
sage: interp.pxd_header
''
sage: interp.pyx_header
'cimport sage.libs.gsl.math # Add dependency on GSL'
sage: interp.err_return
'-1094648009105371'
sage: interp.mc_code
{MC:code}
sage: interp = RRInterpreter()
sage: interp.extra_class_members
''
sage: interp.extra_members_initialize
''
"""
self.c_header = ''
self.pxd_header = ''
self.pyx_header = ''
self.err_return = 'NULL'
self.mc_code = MemoryChunkConstants('code', ty_int)
self.extra_class_members = ''
self.extra_members_initialize = ''
def _set_opcodes(self):
r"""
Assign opcodes to the instructions in this interpreter.
Must be called at the end of __init__ by any subclass of
InterpreterSpec.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: interp = RDFInterpreter()
sage: interp.instr_descs[5].opcode
5
"""
for i in range(len(self.instr_descs)):
self.instr_descs[i].opcode = i
class StackInterpreter(InterpreterSpec):
r"""
A subclass of InterpreterSpec, specialized for stack-based
interpreters. (Currently all interpreters are stack-based.)
"""
def __init__(self, type, mc_retval=None):
r"""
Initialize a StackInterpreter.
INPUTS:
type -- A StorageType; the basic type that this interpreter
operates on
mc_retval -- default None; if not None, a special-purpose
MemoryChunk to use as a return value
Initializes the fields described in the documentation for
InterpreterSpec.__init__, as well as the following:
mc_args, mc_constants, mc_stack -- MemoryChunk values
return_type -- the type returned by the C interpreter (None for int,
where 1 means success and 0 means error)
mc_retval -- None, or the MemoryChunk to use as a return value
ipow_range -- the range of exponents supported by the ipow
instruction (default is False, meaning never use ipow)
adjust_retval -- None, or a string naming a function to call
in the wrapper's __call__ to modify the return
value of the interpreter
implement_call_c -- True if the wrapper should have a fast cdef call_c
method (that bypasses the Python call overhead)
(default True)
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: rdf = RDFInterpreter()
sage: rr = RRInterpreter()
sage: el = ElementInterpreter()
sage: rdf.mc_args
{MC:args}
sage: rdf.mc_constants
{MC:constants}
sage: rdf.mc_stack
{MC:stack}
sage: rr.mc_retval
{MC:retval}
sage: rr.return_type is None
True
sage: rdf.return_type.type
'double'
sage: rdf.implement_call_c
True
sage: el.implement_call_c
False
"""
InterpreterSpec.__init__(self)
self.mc_args = MemoryChunkArguments('args', type)
self.mc_constants = MemoryChunkConstants('constants', type)
self.mc_stack = MemoryChunkScratch('stack', type, is_stack=True)
if isinstance(type, StorageTypeAssignable):
self.return_type = type
else:
self.return_type = None
self.mc_retval = mc_retval
self.ipow_range = False
self.adjust_retval = None
self.implement_call_c = True
class RDFInterpreter(StackInterpreter):
r"""
A subclass of StackInterpreter, specifying an interpreter over
machine-floating-point values (C doubles). This is used for
both domain=RDF and domain=float; currently the only difference
between the two is the type of the value returned from the
wrapper (they use the same wrapper and interpreter).
"""
def __init__(self):
r"""
Initialize an RDFInterpreter.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: interp = RDFInterpreter()
sage: interp.name
'rdf'
sage: interp.extra_class_members
'cdef object _domain\n'
sage: interp.extra_members_initialize
"self._domain = args['domain']\n"
sage: interp.adjust_retval
'self._domain'
sage: interp.mc_py_constants
{MC:py_constants}
sage: interp.chunks
[{MC:args}, {MC:constants}, {MC:py_constants}, {MC:stack}, {MC:code}]
sage: interp.pg('A[D]', 'S')
([({MC:args}, {MC:code}, None)], [({MC:stack}, None, None)])
sage: instrs = dict([(ins.name, ins) for ins in interp.instr_descs])
sage: instrs['add']
add: SS->S = 'o0 = i0 + i1;'
sage: instrs['py_call']
py_call: *->S = '\nPyObject *py_arg...goto error;\n}\n'
Make sure that pow behaves reasonably::
sage: var('x,y')
(x, y)
sage: ff = fast_callable(x^y, vars=[x,y], domain=RDF)
sage: ff(1.5, 3)
3.375
sage: ff(-2, 3)
-8.0
sage: ff(-2, 1/3)
Traceback (most recent call last):
...
ValueError: negative number to a fractional power not real
"""
StackInterpreter.__init__(self, ty_double)
self.name = 'rdf'
self.mc_py_constants = MemoryChunkConstants('py_constants', ty_python)
# This is a randomly chosen number. Whenever this number is
# returned, the wrapper has to check whether an exception actually
# happened, so if an expression evaluates to this number execution
# is slightly slower. Hopefully that won't happen too often :)
self.err_return = '-1094648009105371'
self.chunks = [self.mc_args, self.mc_constants, self.mc_py_constants,
self.mc_stack,
self.mc_code]
pg = params_gen(A=self.mc_args, C=self.mc_constants, D=self.mc_code,
S=self.mc_stack, P=self.mc_py_constants)
self.pg = pg
self.c_header = '#include <gsl/gsl_math.h>'
self.pyx_header = 'cimport sage.libs.gsl.math # Add dependency on GSL'
instrs = [
InstrSpec('load_arg', pg('A[D]', 'S'),
code='o0 = i0;'),
InstrSpec('load_const', pg('C[D]', 'S'),
code='o0 = i0;'),
InstrSpec('return', pg('S', ''),
code='return i0;'),
InstrSpec('py_call', pg('P[D]S@D', 'S'),
uses_error_handler=True,
code="""
PyObject *py_args = PyTuple_New(n_i1);
if (py_args == NULL) goto error;
int i;
for (i = 0; i < n_i1; i++) {
PyObject *arg = PyFloat_FromDouble(i1[i]);
if (arg == NULL) {
Py_DECREF(py_args);
goto error;
}
PyTuple_SET_ITEM(py_args, i, arg);
}
PyObject *result = PyObject_CallObject(i0, py_args);
Py_DECREF(py_args);
if (result == NULL) goto error;
/* If result is not a float, then this will turn it into a float first. */
o0 = PyFloat_AsDouble(result);
Py_DECREF(result);
if (o0 == -1 && PyErr_Occurred()) {
goto error;
}
"""),
InstrSpec('pow', pg('SS', 'S'),
uses_error_handler=True,
code="""
/* See python's pow in floatobject.c */
if (i0 == 0) o0 = 1.0;
else {
if (i0 < 0 && i1 != floor(i1)) {
PyErr_SetString(PyExc_ValueError, "negative number to a fractional power not real");
goto error;
}
o0 = pow(i0, i1);
}
""")
]
for (name, op) in [('add', '+'), ('sub', '-'),
('mul', '*'), ('div', '/')]:
instrs.append(instr_infix(name, pg('SS', 'S'), op))
instrs.append(instr_funcall_2args('ipow', pg('SD', 'S'), 'gsl_pow_int'))
for (name, op) in [('neg', '-i0'), ('invert', '1/i0'),
('abs', 'fabs(i0)')]:
instrs.append(instr_unary(name, pg('S', 'S'), op))
for name in ['sqrt', 'ceil', 'floor', 'sin', 'cos', 'tan',
'asin', 'acos', 'atan', 'sinh', 'cosh', 'tanh',
'asinh', 'acosh', 'atanh', 'exp', 'log']:
instrs.append(instr_unary(name, pg('S', 'S'), "%s(i0)" % name))
self.instr_descs = instrs
self._set_opcodes()
# supported for exponents that fit in an int
self.ipow_range = (int(-2**31), int(2**31-1))
self.extra_class_members = "cdef object _domain\n"
self.extra_members_initialize = "self._domain = args['domain']\n"
self.adjust_retval = 'self._domain'
class CDFInterpreter(StackInterpreter):
r"""
A subclass of StackInterpreter, specifying an interpreter over
complex machine-floating-point values (C doubles).
"""
def __init__(self):
r"""
Initialize a CDFInterpreter.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: interp = CDFInterpreter()
sage: interp.name
'cdf'
sage: interp.mc_py_constants
{MC:py_constants}
sage: interp.chunks
[{MC:args}, {MC:constants}, {MC:py_constants}, {MC:stack}, {MC:code}]
sage: interp.pg('A[D]', 'S')
([({MC:args}, {MC:code}, None)], [({MC:stack}, None, None)])
sage: instrs = dict([(ins.name, ins) for ins in interp.instr_descs])
sage: instrs['add']
add: SS->S = 'o0 = i0 + i1;'
sage: instrs['sin']
sin: S->S = 'o0 = csin(i0);'
sage: instrs['py_call']
py_call: *->S = '\nif (!cdf_py_call_...goto error;\n}\n'
A test of integer powers::
sage: f(x) = sum(x^k for k in [-20..20])
sage: f(CDF(1+2j)) # rel tol 4e-16
-10391778.999999996 + 3349659.499999962*I
sage: ff = fast_callable(f, CDF)
sage: ff(1 + 2j) # rel tol 1e-14
-10391779.000000004 + 3349659.49999997*I
sage: ff.python_calls()
[]
sage: f(x) = sum(x^k for k in [0..5])
sage: ff = fast_callable(f, CDF)
sage: ff(2)
63.0
sage: ff(2j)
13.0 + 26.0*I
"""
StackInterpreter.__init__(self, ty_double_complex)
self.name = 'cdf'
self.mc_py_constants = MemoryChunkConstants('py_constants', ty_python)
# See comment for RDFInterpreter
self.err_return = '-1094648119105371'
self.adjust_retval = "dz_to_CDE"
self.chunks = [self.mc_args, self.mc_constants, self.mc_py_constants,
self.mc_stack,
self.mc_code]
pg = params_gen(A=self.mc_args, C=self.mc_constants, D=self.mc_code,
S=self.mc_stack, P=self.mc_py_constants)
self.pg = pg
self.c_header = """
#include <stdlib.h>
#include <complex.h>
#include "interpreters/wrapper_cdf.h"
/* On Solaris, we need to define _Imaginary_I when compiling with GCC,
* otherwise the constant I doesn't work. The definition below is based
* on glibc. */
#ifdef __GNUC__
#undef _Imaginary_I
#define _Imaginary_I (__extension__ 1.0iF)
#endif
typedef double complex double_complex;
static inline double complex csquareX(double complex z) {
double complex res;
__real__(res) = __real__(z) * __real__(z) - __imag__(z) * __imag__(z);
__imag__(res) = 2 * __real__(z) * __imag__(z);
return res;
}
static inline double complex cpow_int(double complex z, int exp) {
if (exp < 0) return 1/cpow_int(z, -exp);
switch (exp) {
case 0: return 1;
case 1: return z;
case 2: return csquareX(z);
case 3: return csquareX(z) * z;
case 4:
case 5:
case 6:
case 7:
case 8:
{
double complex z2 = csquareX(z);
double complex z4 = csquareX(z2);
if (exp == 4) return z4;
if (exp == 5) return z4 * z;
if (exp == 6) return z4 * z2;
if (exp == 7) return z4 * z2 * z;
if (exp == 8) return z4 * z4;
}
}
if (cimag(z) == 0) return pow(creal(z), exp);
if (creal(z) == 0) {
double r = pow(cimag(z), exp);
switch (exp % 4) {
case 0:
return r;
case 1:
return r * I;
case 2:
return -r;
default /* case 3 */:
return -r * I;
}
}
return cpow(z, exp);
}
"""
self.pxd_header = """
# This is to work around a header incompatibility with PARI using
# "I" as variable conflicting with the complex "I".
# If we cimport pari earlier, we avoid this problem.
cimport sage.libs.pari.types
# We need the type double_complex to work around
# http://trac.cython.org/ticket/869
# so this is a bit hackish.
cdef extern from "complex.h":
ctypedef double double_complex "double complex"
"""
self.pyx_header = """
from sage.rings.complex_double cimport ComplexDoubleElement
import sage.rings.complex_double
cdef object CDF = sage.rings.complex_double.CDF
cdef extern from "solaris_fixes.h":
pass
cdef extern from "complex.h":
cdef double creal(double_complex)
cdef double cimag(double_complex)
cdef double_complex _Complex_I
cdef inline double_complex CDE_to_dz(zz):
cdef ComplexDoubleElement z = <ComplexDoubleElement>(zz if isinstance(zz, ComplexDoubleElement) else CDF(zz))
return z._complex.dat[0] + _Complex_I * z._complex.dat[1]
cdef inline ComplexDoubleElement dz_to_CDE(double_complex dz):
cdef ComplexDoubleElement z = <ComplexDoubleElement>ComplexDoubleElement.__new__(ComplexDoubleElement)
z._complex.dat[0] = creal(dz)
z._complex.dat[1] = cimag(dz)
return z
cdef public bint cdf_py_call_helper(object fn,
int n_args,
double_complex* args, double_complex* retval) except 0:
py_args = []
cdef int i
for i from 0 <= i < n_args:
py_args.append(dz_to_CDE(args[i]))
py_result = fn(*py_args)
cdef ComplexDoubleElement result
if isinstance(py_result, ComplexDoubleElement):
result = <ComplexDoubleElement>py_result
else:
result = CDF(py_result)
retval[0] = CDE_to_dz(result)
return 1
"""[1:]
instrs = [
InstrSpec('load_arg', pg('A[D]', 'S'),
code='o0 = i0;'),
InstrSpec('load_const', pg('C[D]', 'S'),
code='o0 = i0;'),
InstrSpec('return', pg('S', ''),
code='return i0;'),
InstrSpec('py_call', pg('P[D]S@D', 'S'),
uses_error_handler=True,
code="""
if (!cdf_py_call_helper(i0, n_i1, i1, &o0)) {
goto error;
}
""")
]
for (name, op) in [('add', '+'), ('sub', '-'),
('mul', '*'), ('div', '/')]:
instrs.append(instr_infix(name, pg('SS', 'S'), op))
instrs.append(instr_funcall_2args('pow', pg('SS', 'S'), 'cpow'))
instrs.append(instr_funcall_2args('ipow', pg('SD', 'S'), 'cpow_int'))
for (name, op) in [('neg', '-i0'), ('invert', '1/i0'),
('abs', 'cabs(i0)')]:
instrs.append(instr_unary(name, pg('S', 'S'), op))
for name in ['sqrt', 'sin', 'cos', 'tan',
'asin', 'acos', 'atan', 'sinh', 'cosh', 'tanh',
'asinh', 'acosh', 'atanh', 'exp', 'log']:
instrs.append(instr_unary(name, pg('S', 'S'), "c%s(i0)" % name))
self.instr_descs = instrs
self._set_opcodes()
# supported for exponents that fit in an int
self.ipow_range = (int(-2**31), int(2**31-1))
class RRInterpreter(StackInterpreter):
r"""
A subclass of StackInterpreter, specifying an interpreter over
MPFR arbitrary-precision floating-point numbers.
"""
def __init__(self):
r"""
Initialize an RDFInterpreter.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: interp = RRInterpreter()
sage: interp.name
'rr'
sage: interp.mc_py_constants
{MC:py_constants}
sage: interp.chunks
[{MC:args}, {MC:retval}, {MC:constants}, {MC:py_constants}, {MC:stack}, {MC:code}, {MC:domain}]
sage: interp.pg('A[D]', 'S')
([({MC:args}, {MC:code}, None)], [({MC:stack}, None, None)])
sage: instrs = dict([(ins.name, ins) for ins in interp.instr_descs])
sage: instrs['add']
add: SS->S = 'mpfr_add(o0, i0, i1, MPFR_RNDN);'
sage: instrs['py_call']
py_call: *->S = '\nif (!rr_py_call_h...goto error;\n}\n'
That py_call instruction is particularly interesting, and
demonstrates a useful technique to let you use Cython code
in an interpreter. Let's look more closely::
sage: print(instrs['py_call'].code)
if (!rr_py_call_helper(domain, i0, n_i1, i1, o0)) {
goto error;
}
This instruction makes use of the function ``rr_py_call_helper``,
which is declared in ``wrapper_rr.h``::
sage: print(interp.c_header)
<BLANKLINE>
#include <mpfr.h>
#include "interpreters/wrapper_rr.h"
<BLANKLINE>
The function ``rr_py_call_helper`` is implemented in Cython::
sage: print(interp.pyx_header)
# distutils: libraries = mpfr gmp
<BLANKLINE>
cdef public bint rr_py_call_helper(object domain, object fn,
int n_args,
mpfr_t* args, mpfr_t retval) except 0:
py_args = []
cdef int i
cdef RealNumber rn
for i from 0 <= i < n_args:
rn = domain()
mpfr_set(rn.value, args[i], MPFR_RNDN)
py_args.append(rn)
cdef RealNumber result = domain(fn(*py_args))
mpfr_set(retval, result.value, MPFR_RNDN)
return 1
So instructions where you need to interact with Python can
call back into Cython code fairly easily.
"""
StackInterpreter.__init__(self, ty_mpfr, mc_retval= MemoryChunkRRRetval('retval', ty_mpfr))
self.name = 'rr'
self.err_return = '0'
self.mc_py_constants = MemoryChunkConstants('py_constants', ty_python)
self.mc_domain = MemoryChunkPyConstant('domain')
self.chunks = [self.mc_args, self.mc_retval, self.mc_constants,
self.mc_py_constants,
self.mc_stack, self.mc_code, self.mc_domain]
pg = params_gen(A=self.mc_args, C=self.mc_constants, D=self.mc_code,
S=self.mc_stack,
P=self.mc_py_constants)
self.pg = pg
self.c_header = '''
#include <mpfr.h>
#include "interpreters/wrapper_rr.h"
'''
self.pxd_header = """
from sage.rings.real_mpfr cimport RealField_class, RealNumber
from sage.libs.mpfr cimport *
"""
self.pyx_header = """# distutils: libraries = mpfr gmp
cdef public bint rr_py_call_helper(object domain, object fn,
int n_args,
mpfr_t* args, mpfr_t retval) except 0:
py_args = []
cdef int i
cdef RealNumber rn
for i from 0 <= i < n_args:
rn = domain()
mpfr_set(rn.value, args[i], MPFR_RNDN)
py_args.append(rn)
cdef RealNumber result = domain(fn(*py_args))
mpfr_set(retval, result.value, MPFR_RNDN)
return 1
"""
instrs = [
InstrSpec('load_arg', pg('A[D]', 'S'),
code='mpfr_set(o0, i0, MPFR_RNDN);'),
InstrSpec('load_const', pg('C[D]', 'S'),
code='mpfr_set(o0, i0, MPFR_RNDN);'),
InstrSpec('return', pg('S', ''),
code='mpfr_set(retval, i0, MPFR_RNDN);\nreturn 1;\n'),
InstrSpec('py_call', pg('P[D]S@D', 'S'),
uses_error_handler=True,
code="""
if (!rr_py_call_helper(domain, i0, n_i1, i1, o0)) {
goto error;
}
""")
]
for (name, op) in [('add', 'mpfr_add'), ('sub', 'mpfr_sub'),
('mul', 'mpfr_mul'), ('div', 'mpfr_div'),
('pow', 'mpfr_pow')]:
instrs.append(instr_funcall_2args_mpfr(name, pg('SS', 'S'), op))
instrs.append(instr_funcall_2args_mpfr('ipow', pg('SD', 'S'), 'mpfr_pow_si'))
for name in ['neg', 'abs',
'log', 'log2', 'log10',
'exp', 'exp2', 'exp10',
'cos', 'sin', 'tan',
'sec', 'csc', 'cot',
'acos', 'asin', 'atan',
'cosh', 'sinh', 'tanh',
'sech', 'csch', 'coth',
'acosh', 'asinh', 'atanh',
'log1p', 'expm1', 'eint',
'gamma', 'lngamma',
'zeta', 'erf', 'erfc',
'j0', 'j1', 'y0', 'y1']:
instrs.append(instr_funcall_1arg_mpfr(name, pg('S', 'S'), 'mpfr_' + name))
# mpfr_ui_div constructs a temporary mpfr_t and then calls mpfr_div;
# it would probably be (slightly) faster to use a permanent copy
# of "one" (on the other hand, the constructed temporary copy is
# on the stack, so it's very likely to be in the cache).
instrs.append(InstrSpec('invert', pg('S', 'S'),
code='mpfr_ui_div(o0, 1, i0, MPFR_RNDN);'))
self.instr_descs = instrs
self._set_opcodes()
# Supported for exponents that fit in a long, so we could use
# a much wider range on a 64-bit machine. On the other hand,
# it's easier to write the code this way, and constant integer
# exponents outside this range probably aren't very common anyway.
self.ipow_range = (int(-2**31), int(2**31-1))
class PythonInterpreter(StackInterpreter):
r"""
A subclass of StackInterpreter, specifying an interpreter over
Python objects.
Let's discuss how the reference-counting works in Python-object
based interpreters.
There is a simple rule to remember: when executing the code
snippets, the input variables contain borrowed references;
you must fill in the output variables with references you own.
As an optimization, an instruction may set .handles_own_decref; in
that case, it must decref any input variables that came from the
stack. (Input variables that came from arguments/constants chunks
must NOT be decref'ed!) In addition, with .handles_own_decref, if
any of your input variables are arbitrary-count, then you must
NULL out these variables as you decref them. (Use Py_CLEAR to do
this, unless you understand the documentation of Py_CLEAR and why
it's different than Py_XDECREF followed by assigning NULL.)
Note that as a tiny optimization, the interpreter always assumes
(and ensures) that empty parts of the stack contain NULL, so
it doesn't bother to Py_XDECREF before it pushes onto the stack.
"""
def __init__(self):
r"""
Initialize a PythonInterpreter.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: interp = PythonInterpreter()
sage: interp.name
'py'
sage: interp.mc_args
{MC:args}
sage: interp.chunks
[{MC:args}, {MC:constants}, {MC:stack}, {MC:code}]
sage: instrs = dict([(ins.name, ins) for ins in interp.instr_descs])
sage: instrs['add']
add: SS->S = 'o0 = PyNumber_Add(i0, i1);'
sage: instrs['py_call']
py_call: *->S = '\nPyObject *py_args...CREF(py_args);\n'
"""
StackInterpreter.__init__(self, ty_python)
self.name = 'py'
# StackInterpreter.__init__ gave us a MemoryChunkArguments.
# Override with MemoryChunkPythonArguments.
self.mc_args = MemoryChunkPythonArguments('args', ty_python)
self.chunks = [self.mc_args, self.mc_constants, self.mc_stack,
self.mc_code]
pg = params_gen(A=self.mc_args, C=self.mc_constants, D=self.mc_code,
S=self.mc_stack)
self.pg = pg
self.c_header = """
#define CHECK(x) (x != NULL)
"""
instrs = [
InstrSpec('load_arg', pg('A[D]', 'S'),
code='o0 = i0; Py_INCREF(o0);'),
InstrSpec('load_const', pg('C[D]', 'S'),
code='o0 = i0; Py_INCREF(o0);'),
InstrSpec('return', pg('S', ''),
code='return i0;',
handles_own_decref=True),
InstrSpec('py_call', pg('C[D]S@D', 'S'),
handles_own_decref=True,
code="""
PyObject *py_args = PyTuple_New(n_i1);
if (py_args == NULL) goto error;
int i;
for (i = 0; i < n_i1; i++) {
PyObject *arg = i1[i];
PyTuple_SET_ITEM(py_args, i, arg);
i1[i] = NULL;
}
o0 = PyObject_CallObject(i0, py_args);
Py_DECREF(py_args);
""")
]
for (name, op) in [('add', 'PyNumber_Add'),
('sub', 'PyNumber_Subtract'),
('mul', 'PyNumber_Multiply'),
('div', 'PyNumber_Divide')]:
instrs.append(instr_funcall_2args(name, pg('SS', 'S'), op))
instrs.append(InstrSpec('pow', pg('SS', 'S'),
code='o0 = PyNumber_Power(i0, i1, Py_None);'))
instrs.append(InstrSpec('ipow', pg('SC[D]', 'S'),
code='o0 = PyNumber_Power(i0, i1, Py_None);'))
for (name, op) in [('neg', 'PyNumber_Negative'),
('invert', 'PyNumber_Invert'),
('abs', 'PyNumber_Absolute')]:
instrs.append(instr_unary(name, pg('S', 'S'), '%s(i0)'%op))
self.instr_descs = instrs
self._set_opcodes()
# Always use ipow
self.ipow_range = True
# We don't yet support call_c for Python-object interpreters
# (the default implementation doesn't work, because of
# object vs. PyObject* confusion)
self.implement_call_c = False
class ElementInterpreter(PythonInterpreter):
r"""
A subclass of PythonInterpreter, specifying an interpreter over
Sage elements with a particular parent.
This is very similar to the PythonInterpreter, but after every
instruction, the result is checked to make sure it actually an
element with the correct parent; if not, we attempt to convert it.
Uses the same instructions (with the same implementation) as
PythonInterpreter.
"""
def __init__(self):
r"""
Initialize an ElementInterpreter.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: interp = ElementInterpreter()
sage: interp.name
'el'
sage: interp.mc_args
{MC:args}
sage: interp.chunks
[{MC:args}, {MC:constants}, {MC:stack}, {MC:domain}, {MC:code}]
sage: instrs = dict([(ins.name, ins) for ins in interp.instr_descs])
sage: instrs['add']
add: SS->S = 'o0 = PyNumber_Add(i0, i1);'
sage: instrs['py_call']
py_call: *->S = '\nPyObject *py_args...CREF(py_args);\n'
"""
PythonInterpreter.__init__(self)
self.name = 'el'
# PythonInterpreter.__init__ gave us a MemoryChunkPythonArguments.
# Override with MemoryChunkElementArguments.
self.mc_args = MemoryChunkElementArguments('args', ty_python)
self.mc_domain_info = MemoryChunkPyConstant('domain')
self.chunks = [self.mc_args, self.mc_constants, self.mc_stack,
self.mc_domain_info, self.mc_code]
self.c_header = """
#include "interpreters/wrapper_el.h"
#define CHECK(x) do_check(&(x), domain)
static inline int do_check(PyObject **x, PyObject *domain) {
if (*x == NULL) return 0;
PyObject *new_x = el_check_element(*x, domain);
Py_DECREF(*x);
*x = new_x;
if (*x == NULL) return 0;
return 1;
}
"""
self.pyx_header = """
from sage.structure.element cimport Element
cdef public object el_check_element(object v, parent):
cdef Element v_el
if isinstance(v, Element):
v_el = <Element>v
if v_el._parent is parent:
return v_el
return parent(v)
"""[1:]
class InterpreterGenerator(object):
r"""
This class takes an InterpreterSpec and generates the corresponding
C interpreter and Cython wrapper.
See the documentation for methods get_wrapper and get_interpreter
for more information.
"""
def __init__(self, spec):
r"""
Initialize an InterpreterGenerator.
INPUT:
- ``spec`` -- an InterpreterSpec
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: interp = RDFInterpreter()
sage: gen = InterpreterGenerator(interp)
sage: gen._spec is interp
True
sage: gen.uses_error_handler
False
"""
self._spec = spec
self.uses_error_handler = False
def gen_code(self, instr_desc, write):
r"""
Generates code for a single instruction.
INPUTS:
instr_desc -- an InstrSpec
write -- a Python callable
This function calls its write parameter successively with
strings; when these strings are concatenated, the result is
the code for the given instruction.
See the documentation for the get_interpreter method for more
information.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: interp = RDFInterpreter()
sage: gen = InterpreterGenerator(interp)
sage: from six.moves import cStringIO as StringIO
sage: buff = StringIO()
sage: instrs = dict([(ins.name, ins) for ins in interp.instr_descs])
sage: gen.gen_code(instrs['div'], buff.write)
sage: print(buff.getvalue())
case 8: /* div */
{
double i1 = *--stack;
double i0 = *--stack;
double o0;
o0 = i0 / i1;
*stack++ = o0;
}
break;
<BLANKLINE>
"""
d = instr_desc
w = write
s = self._spec
if d.uses_error_handler:
self.uses_error_handler = True
w(je("""
case {{ d.opcode }}: /* {{ d.name }} */
{
""", d=d))
# If the inputs to an instruction come from the stack,
# then we want to generate code for the inputs in reverse order:
# for instance, the divide instruction, which takes inputs A and B
# and generates A/B, needs to pop B off the stack first.
# On the other hand, if the inputs come from the constant pool,
# then we want to generate code for the inputs in normal order,
# because the addresses in the code stream will be in that order.
# We handle this by running through the inputs in two passes:
# first a forward pass, where we handle non-stack inputs
# (and lengths for stack inputs), and then a reverse pass,
# where we handle stack inputs.
for i in range(len(d.inputs)):
(ch, addr, input_len) = d.inputs[i]
chst = ch.storage_type
if addr is not None:
w(" int ai%d = %s;\n" % (i, string_of_addr(addr)))
if input_len is not None:
w(" int n_i%d = %s;\n" % (i, string_of_addr(input_len)))
if not ch.is_stack():
# Shouldn't hardcode 'code' here
if ch.name == 'code':
w(" %s i%d = %s;\n" % (chst.c_local_type(), i, string_of_addr(ch)))
elif input_len is not None:
w(" %s i%d = %s + ai%d;\n" %
(chst.c_ptr_type(), i, ch.name, i))
else:
w(" %s i%d = %s[ai%d];\n" %
(chst.c_local_type(), i, ch.name, i))
for i in reversed(range(len(d.inputs))):
(ch, addr, input_len) = d.inputs[i]
chst = ch.storage_type
if ch.is_stack():
if input_len is not None:
w(" %s -= n_i%d;\n" % (ch.name, i))
w(" %s i%d = %s;\n" % (chst.c_ptr_type(), i, ch.name))
else:
w(" %s i%d = *--%s;\n" % (chst.c_local_type(), i, ch.name))
if ch.is_python_refcounted_stack():
w(" *%s = NULL;\n" % ch.name)
for i in range(len(d.outputs)):
(ch, addr, output_len) = d.outputs[i]
chst = ch.storage_type
if addr is not None:
w(" int ao%d = %s;\n" % (i, string_of_addr(addr)))
if output_len is not None:
w(" int n_o%d = %s;\n" % (i, string_of_addr(output_len)))
if ch.is_stack():
w(" %s o%d = %s;\n" %
(chst.c_ptr_type(), i, ch.name))
w(" %s += n_o%d;\n" % (ch.name, i))
else:
w(" %s o%d = %s + ao%d;\n" %
(chst.c_ptr_type(), i, ch.name, i))
else:
if not chst.cheap_copies():
if ch.is_stack():
w(" %s o%d = *%s++;\n" %
(chst.c_local_type(), i, ch.name))
else:
w(" %s o%d = %s[ao%d];\n" %
(chst.c_local_type(), i, ch.name, i))
else:
w(" %s o%d;\n" % (chst.c_local_type(), i))
w(indent_lines(8, d.code.rstrip('\n') + '\n'))
stack_offsets = defaultdict(int)
for i in range(len(d.inputs)):
(ch, addr, input_len) = d.inputs[i]
chst = ch.storage_type
if ch.is_python_refcounted_stack() and not d.handles_own_decref:
if input_len is None:
w(" Py_DECREF(i%d);\n" % i)
stack_offsets[ch] += 1
else:
w(je("""
int {{ iter }};
for ({{ iter }} = 0; {{ iter }} < n_i{{ i }}; {{ iter }}++) {
Py_CLEAR(i{{ i }}[{{ iter }}]);
}
""", iter='_interp_iter_%d' % i, i=i))
for i in range(len(d.outputs)):
ch = d.outputs[i][0]
chst = ch.storage_type
if chst.python_refcounted():
# We don't yet support code chunks
# that produce multiple Python values, because of
# the way it complicates error handling.
assert i == 0
w(" if (!CHECK(o%d)) {\n" % i)
w(" Py_XDECREF(o%d);\n" % i)
w(" goto error;\n")
w(" }\n")
self.uses_error_handler = True
if chst.cheap_copies():
if ch.is_stack():
w(" *%s++ = o%d;\n" % (ch.name, i))
else:
w(" %s[ao%d] = o%d;\n" % (ch.name, i, i))
w(je("""
}
break;
"""))
def func_header(self, cython=False):
r"""
Generates the function header for the declaration (in the Cython
wrapper) or the definition (in the C interpreter) of the interpreter
function.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: interp = ElementInterpreter()
sage: gen = InterpreterGenerator(interp)
sage: print(gen.func_header())
PyObject* interp_el(PyObject** args,
PyObject** constants,
PyObject** stack,
PyObject* domain,
int* code)
sage: print(gen.func_header(cython=True))
object interp_el(PyObject** args,
PyObject** constants,
PyObject** stack,
PyObject* domain,
int* code)
"""
s = self._spec
ret_ty = 'bint' if cython else 'int'
if s.return_type:
ret_ty = s.return_type.c_decl_type()
if cython:
ret_ty = s.return_type.cython_decl_type()
return je("""{{ ret_ty }} interp_{{ s.name }}(
{%- for ch in s.chunks %}
{% if not loop.first %},
{% endif %}{{ ch.declare_parameter() }}
{%- endfor %})""", ret_ty=ret_ty, s=s)
def write_interpreter(self, write):
r"""
Generate the code for the C interpreter.
This function calls its write parameter successively with
strings; when these strings are concatenated, the result is
the code for the interpreter.
See the documentation for the get_interpreter method for more
information.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: interp = RDFInterpreter()
sage: gen = InterpreterGenerator(interp)
sage: from six.moves import cStringIO as StringIO
sage: buff = StringIO()
sage: gen.write_interpreter(buff.write)
sage: print(buff.getvalue())
/* Automatically generated by ...
"""
s = self._spec
w = write
w(je("""
/* {{ warn }} */
#include <Python.h>
{% print(s.c_header) %}
{{ myself.func_header() }} {
while (1) {
switch (*code++) {
""", s=s, myself=self, i=indent_lines, warn=autogen_warn))
for instr_desc in s.instr_descs:
self.gen_code(instr_desc, w)
w(je("""
}
}
{% if myself.uses_error_handler %}
error:
return {{ s.err_return }};
{% endif %}
}
""", s=s, i=indent_lines, myself=self))
def write_wrapper(self, write):
r"""
Generate the code for the Cython wrapper.
This function calls its write parameter successively with
strings; when these strings are concatenated, the result is
the code for the wrapper.
See the documentation for the get_wrapper method for more
information.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: interp = RDFInterpreter()
sage: gen = InterpreterGenerator(interp)
sage: from six.moves import cStringIO as StringIO
sage: buff = StringIO()
sage: gen.write_wrapper(buff.write)
sage: print(buff.getvalue())
# Automatically generated by ...
"""
s = self._spec
w = write
types = set()
do_cleanup = False
for ch in s.chunks:
if ch.storage_type is not None:
types.add(ch.storage_type)
do_cleanup = do_cleanup or ch.needs_cleanup_on_error()
for ch in s.chunks:
if ch.name == 'args':
arg_ch = ch
the_call = je("""
{% if s.return_type %}return {% endif -%}
{% if s.adjust_retval %}{{ s.adjust_retval }}({% endif %}
interp_{{ s.name }}({{ arg_ch.pass_argument() }}
{% for ch in s.chunks[1:] %}
, {{ ch.pass_argument() }}
{% endfor %}
){% if s.adjust_retval %}){% endif %}
""", s=s, arg_ch=arg_ch)
the_call_c = je("""
{% if s.return_type %}result[0] = {% endif %}
interp_{{ s.name }}(args
{% for ch in s.chunks[1:] %}
, {{ ch.pass_call_c_argument() }}
{% endfor %}
)
""", s=s, arg_ch=arg_ch)
w(je("""
# {{ warn }}
# distutils: sources = sage/ext/interpreters/interp_{{ s.name }}.c
{{ s.pyx_header }}
include "cysignals/memory.pxi"
from cpython.ref cimport PyObject
cdef extern from "Python.h":
void Py_DECREF(PyObject *o)
void Py_INCREF(PyObject *o)
void Py_CLEAR(PyObject *o)
object PyList_New(Py_ssize_t len)
ctypedef struct PyListObject:
PyObject **ob_item
ctypedef struct PyTupleObject:
PyObject **ob_item
from sage.ext.fast_callable cimport Wrapper
cdef extern:
{{ myself.func_header(cython=true) -}}
{% if s.err_return != 'NULL' %}
except? {{ s.err_return }}
{% endif %}
cdef class Wrapper_{{ s.name }}(Wrapper):
# attributes are declared in corresponding .pxd file
def __init__(self, args):
Wrapper.__init__(self, args, metadata)
cdef int i
cdef int count
{% for ty in types %}
{% print(indent_lines(8, ty.local_declarations)) %}
{% print(indent_lines(8, ty.class_member_initializations)) %}
{% endfor %}
{% for ch in s.chunks %}
{% print(ch.init_class_members()) %}
{% endfor %}
{% print(indent_lines(8, s.extra_members_initialize)) %}
def __dealloc__(self):
cdef int i
{% for ch in s.chunks %}
{% print(ch.dealloc_class_members()) %}
{% endfor %}
def __call__(self, *args):
if self._n_args != len(args): raise ValueError
{% for ty in types %}
{% print(indent_lines(8, ty.local_declarations)) %}
{% endfor %}
{% print(indent_lines(8, arg_ch.setup_args())) %}
{% for ch in s.chunks %}
{% print(ch.declare_call_locals()) %}
{% endfor %}
{% if do_cleanup %}
try:
{% print(indent_lines(4, the_call)) %}
except BaseException:
{% for ch in s.chunks %}
{% if ch.needs_cleanup_on_error() %}
{% print(indent_lines(12, ch.handle_cleanup())) %}
{% endif %}
{% endfor %}
raise
{% else %}
{% print(the_call) %}
{% endif %}
{% if not s.return_type %}
return retval
{% endif %}
{% if s.implement_call_c %}
cdef bint call_c(self,
{{ arg_ch.storage_type.c_ptr_type() }} args,
{{ arg_ch.storage_type.c_reference_type() }} result) except 0:
{% if do_cleanup %}
try:
{% print(indent_lines(4, the_call_c)) %}
except BaseException:
{% for ch in s.chunks %}
{% if ch.needs_cleanup_on_error() %}
{% print(indent_lines(12, ch.handle_cleanup())) %}
{% endif %}
{% endfor %}
raise
{% else %}
{% print(the_call_c) %}
{% endif %}
return 1
{% endif %}
from sage.ext.fast_callable import CompilerInstrSpec, InterpreterMetadata
metadata = InterpreterMetadata(by_opname={
{% for instr in s.instr_descs %}
'{{ instr.name }}':
(CompilerInstrSpec({{ instr.n_inputs }}, {{ instr.n_outputs }}, {{ instr.parameters }}), {{ instr.opcode }}),
{% endfor %}
},
by_opcode=[
{% for instr in s.instr_descs %}
('{{ instr.name }}',
CompilerInstrSpec({{ instr.n_inputs }}, {{ instr.n_outputs }}, {{ instr.parameters }})),
{% endfor %}
],
ipow_range={{ s.ipow_range }})
""", s=s, myself=self, types=types, arg_ch=arg_ch,
indent_lines=indent_lines, the_call=the_call,
the_call_c=the_call_c, do_cleanup=do_cleanup, warn=autogen_warn))
def write_pxd(self, write):
r"""
Generate the pxd file for the Cython wrapper.
This function calls its write parameter successively with
strings; when these strings are concatenated, the result is
the code for the pxd file.
See the documentation for the get_pxd method for more
information.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: interp = RDFInterpreter()
sage: gen = InterpreterGenerator(interp)
sage: from six.moves import cStringIO as StringIO
sage: buff = StringIO()
sage: gen.write_pxd(buff.write)
sage: print(buff.getvalue())
# Automatically generated by ...
"""
s = self._spec
w = write
types = set()
for ch in s.chunks:
if ch.storage_type is not None:
types.add(ch.storage_type)
for ch in s.chunks:
if ch.name == 'args':
arg_ch = ch
w(je("""
# {{ warn }}
from cpython cimport PyObject
from sage.ext.fast_callable cimport Wrapper
{% print(s.pxd_header) %}
cdef class Wrapper_{{ s.name }}(Wrapper):
{% for ty in types %}
{% print(indent_lines(4, ty.class_member_declarations)) %}
{% endfor %}
{% for ch in s.chunks %}
{% print(ch.declare_class_members()) %}
{% endfor %}
{% print(indent_lines(4, s.extra_class_members)) %}
{% if s.implement_call_c %}
cdef bint call_c(self,
{{ arg_ch.storage_type.c_ptr_type() }} args,
{{ arg_ch.storage_type.c_reference_type() }} result) except 0
{% endif %}
""", s=s, myself=self, types=types, indent_lines=indent_lines,
arg_ch=arg_ch, warn=autogen_warn))
def get_interpreter(self):
r"""
Return the code for the C interpreter.
EXAMPLES:
First we get the InterpreterSpec for several interpreters::
sage: from sage_setup.autogen.interpreters import *
sage: rdf_spec = RDFInterpreter()
sage: rr_spec = RRInterpreter()
sage: el_spec = ElementInterpreter()
Then we get the actual interpreter code::
sage: rdf_interp = InterpreterGenerator(rdf_spec).get_interpreter()
sage: rr_interp = InterpreterGenerator(rr_spec).get_interpreter()
sage: el_interp = InterpreterGenerator(el_spec).get_interpreter()
Now we can look through these interpreters.
Each interpreter starts with a file header; this can be
customized on a per-interpreter basis::
sage: print(rr_interp)
/* Automatically generated by ... */
...
Next is the function header, with one argument per memory chunk
in the interpreter spec::
sage: print(el_interp)
/* ... */ ...
PyObject* interp_el(PyObject** args,
PyObject** constants,
PyObject** stack,
PyObject* domain,
int* code) {
...
Currently, the interpreters have a very simple structure; just
grab the next instruction and execute it, in a switch
statement::
sage: print(rdf_interp)
/* ... */ ...
while (1) {
switch (*code++) {
...
Then comes the code for each instruction. Here is one of the
simplest instructions::
sage: print(rdf_interp)
/* ... */ ...
case 10: /* neg */
{
double i0 = *--stack;
double o0;
o0 = -i0;
*stack++ = o0;
}
break;
...
We simply pull the top of the stack into a variable, negate it,
and write the result back onto the stack.
Let's look at the MPFR-based version of this instruction.
This is an example of an interpreter with an auto-reference
type::
sage: print(rr_interp)
/* ... */ ...
case 10: /* neg */
{
mpfr_ptr i0 = *--stack;
mpfr_ptr o0 = *stack++;
mpfr_neg(o0, i0, MPFR_RNDN);
}
break;
...
Here we see that the input and output variables are actually
just pointers into the stack. But due to the auto-reference
trick, the actual code snippet, ``mpfr_net(o0, i0, MPFR_RNDN);``,
is exactly the same as if i0 and o0 were declared as local
mpfr_t variables.
For completeness, let's look at this instruction in the
Python-object element interpreter::
sage: print(el_interp)
/* ... */ ...
case 10: /* neg */
{
PyObject* i0 = *--stack;
*stack = NULL;
PyObject* o0;
o0 = PyNumber_Negative(i0);
Py_DECREF(i0);
if (!CHECK(o0)) {
Py_XDECREF(o0);
goto error;
}
*stack++ = o0;
}
break;
...
The original code snippet was only ``o0 = PyNumber_Negative(i0);``;
all the rest is automatically generated. For ElementInterpreter,
the CHECK macro actually checks for an exception (makes sure that
o0 is not NULL), tests if the o0 is an element with the correct
parent, and if not converts it into the correct parent. (That is,
it can potentially modify the variable o0.)
"""
from six.moves import cStringIO as StringIO
buff = StringIO()
self.write_interpreter(buff.write)
return buff.getvalue()
def get_wrapper(self):
r"""
Return the code for the Cython wrapper.
EXAMPLES:
First we get the InterpreterSpec for several interpreters::
sage: from sage_setup.autogen.interpreters import *
sage: rdf_spec = RDFInterpreter()
sage: rr_spec = RRInterpreter()
sage: el_spec = ElementInterpreter()
Then we get the actual wrapper code::
sage: rdf_wrapper = InterpreterGenerator(rdf_spec).get_wrapper()
sage: rr_wrapper = InterpreterGenerator(rr_spec).get_wrapper()
sage: el_wrapper = InterpreterGenerator(el_spec).get_wrapper()
Now we can look through these wrappers.
Each wrapper starts with a file header; this can be
customized on a per-interpreter basis (some blank lines have been
elided below)::
sage: print(rdf_wrapper)
# Automatically generated by ...
include "cysignals/memory.pxi"
from cpython.ref cimport PyObject
cdef extern from "Python.h":
void Py_DECREF(PyObject *o)
void Py_INCREF(PyObject *o)
void Py_CLEAR(PyObject *o)
<BLANKLINE>
object PyList_New(Py_ssize_t len)
ctypedef struct PyListObject:
PyObject **ob_item
<BLANKLINE>
ctypedef struct PyTupleObject:
PyObject **ob_item
<BLANKLINE>
from sage.ext.fast_callable cimport Wrapper
...
We need a way to propagate exceptions back to the wrapper,
even though we only return a double from interp_rdf. The
``except? -1094648009105371`` (that's a randomly chosen
number) means that we will return that number if there's an
exception, but the wrapper still has to check whether that's a
legitimate return or an exception. (Cython does this
automatically.)
Next comes the actual wrapper class. The member declarations
are in the corresponding pxd file; see the documentation for
get_pxd to see them::
sage: print(rdf_wrapper)
# ...
cdef class Wrapper_rdf(Wrapper):
# attributes are declared in corresponding .pxd file
...
Next is the __init__ method, which starts like this::
sage: print(rdf_wrapper)
# ...
def __init__(self, args):
Wrapper.__init__(self, args, metadata)
cdef int i
cdef int count
...
To make it possible to generate code for all expression
interpreters with a single code generator, all wrappers
have the same API. The __init__ method takes a single
argument (here called *args*), which is a dictionary holding
all the information needed to initialize this wrapper.
We call Wrapper.__init__, which saves a copy of this arguments
object and of the interpreter metadata in the wrapper. (This is
only used for debugging.)
Now we allocate memory for each memory chunk. (We allocate
the memory here, and reuse it on each call of the
wrapper/interpreter. This is for speed reasons; in a fast
interpreter like RDFInterpreter, there are no memory allocations
involved in a call of the wrapper, except for the ones that
are required by the Python calling convention. Eventually
we will support alternate Cython-only entry points that do
absolutely no memory allocation.)
Basically the same code is repeated, with minor variations, for
each memory chunk; for brevity, we'll only show the code
for 'constants'::
sage: print(rdf_wrapper)
# ...
val = args['constants']
self._n_constants = len(val)
self._constants = <double*>sig_malloc(sizeof(double) * len(val))
if self._constants == NULL: raise MemoryError
for i in range(len(val)):
self._constants[i] = val[i]
...
Recall that _n_constants is an int, and _constants is a
double*.
The RRInterpreter version is more complicated, because it has to
call mpfr_init::
sage: print(rr_wrapper)
# ...
cdef RealNumber rn
...
val = args['constants']
self._n_constants = len(val)
self._constants = <mpfr_t*>sig_malloc(sizeof(mpfr_t) * len(val))
if self._constants == NULL: raise MemoryError
for i in range(len(val)):
mpfr_init2(self._constants[i], self.domain.prec())
for i in range(len(val)):
rn = self.domain(val[i])
mpfr_set(self._constants[i], rn.value, MPFR_RNDN)
...
And as described in the documentation for get_pxd, in
Python-object based interpreters we actually allocate the
memory as a Python list::
sage: print(el_wrapper)
# ...
val = args['constants']
self._n_constants = len(val)
self._list_constants = PyList_New(self._n_constants)
self._constants = (<PyListObject *>self._list_constants).ob_item
for i in range(len(val)):
self._constants[i] = <PyObject *>val[i]; Py_INCREF(self._constants[i])
...
Of course, once we've allocated the memory, we eventually have
to free it. (Again, we'll only look at 'constants'.)::
sage: print(rdf_wrapper)
# ...
def __dealloc__(self):
...
if self._constants:
sig_free(self._constants)
...
The RRInterpreter code is more complicated again because it has
to call mpfr_clear::
sage: print(rr_wrapper)
# ...
def __dealloc__(self):
cdef int i
...
if self._constants:
for i in range(self._n_constants):
mpfr_clear(self._constants[i])
sig_free(self._constants)
...
But the ElementInterpreter code is extremely simple --
it doesn't have to do anything to deallocate constants!
(Since the memory for constants is actually allocated as a
Python list, and Cython knows how to deallocate Python lists.)
Finally we get to the __call__ method. We grab the arguments
passed by the caller, stuff them in our pre-allocated
argument array, and then call the C interpreter.
We optionally adjust the return value of the interpreter
(currently only the RDF/float interpreter performs this step;
this is the only place where domain=RDF differs than
domain=float)::
sage: print(rdf_wrapper)
# ...
def __call__(self, *args):
if self._n_args != len(args): raise ValueError
cdef double* c_args = self._args
cdef int i
for i from 0 <= i < len(args):
self._args[i] = args[i]
return self._domain(interp_rdf(c_args
, self._constants
, self._py_constants
, self._stack
, self._code
))
...
In Python-object based interpreters, the call to the C
interpreter has to be a little more complicated. We don't
want to hold on to Python objects from an old computation by
leaving them referenced from the stack. In normal operation,
the C interpreter clears out the stack as it runs, leaving the
stack totally clear when the interpreter finishes. However,
this doesn't happen if the C interpreter raises an exception.
In that case, we have to clear out any remnants from the stack
in the wrapper::
sage: print(el_wrapper)
# ...
try:
return interp_el((<PyListObject*>mapped_args).ob_item
, self._constants
, self._stack
, <PyObject*>self._domain
, self._code
)
except BaseException:
for i in range(self._n_stack):
Py_CLEAR(self._stack[i])
raise
...
Finally, we define a cdef call_c method, for quickly calling
this object from Cython. (The method is omitted from
Python-object based interpreters.)::
sage: print(rdf_wrapper)
# ...
cdef bint call_c(self,
double* args,
double* result) except 0:
result[0] = interp_rdf(args
, self._constants
, self._py_constants
, self._stack
, self._code
)
return 1
...
The method for the RR interpreter is slightly different, because
the interpreter takes a pointer to a result location instead of
returning the value::
sage: print(rr_wrapper)
# ...
cdef bint call_c(self,
mpfr_t* args,
mpfr_t result) except 0:
interp_rr(args
, result
, self._constants
, self._py_constants
, self._stack
, self._code
, <PyObject*>self._domain
)
return 1
...
That's it for the wrapper class. The only thing remaining is
the interpreter metadata. This is the information necessary
for the code generator to map instruction names to opcodes; it
also gives information about stack usage, etc. This is fully
documented at InterpreterMetadata; for now, we'll just show
what it looks like.
Currently, there are three parts to the metadata; the first maps
instruction names to instruction descriptions. The second one
maps opcodes to instruction descriptions. Note that we don't
use InstrSpec objects here; instead, we use CompilerInstrSpec
objects, which are much simpler and contain only the information
we'll need at runtime. The third part says what range the
ipow instruction is defined over.
First the part that maps instruction names to
(CompilerInstrSpec, opcode) pairs::
sage: print(rdf_wrapper)
# ...
from sage.ext.fast_callable import CompilerInstrSpec, InterpreterMetadata
metadata = InterpreterMetadata(by_opname={
...
'return':
(CompilerInstrSpec(1, 0, []), 2),
'py_call':
(CompilerInstrSpec(0, 1, ['py_constants', 'n_inputs']), 3),
'pow':
(CompilerInstrSpec(2, 1, []), 4),
'add':
(CompilerInstrSpec(2, 1, []), 5),
...
}, ...)
There's also a table that maps opcodes to (instruction name,
CompilerInstrSpec) pairs::
sage: print(rdf_wrapper)
# ...
metadata = InterpreterMetadata(..., by_opcode=[
...
('return',
CompilerInstrSpec(1, 0, [])),
('py_call',
CompilerInstrSpec(0, 1, ['py_constants', 'n_inputs'])),
('pow',
CompilerInstrSpec(2, 1, [])),
('add',
CompilerInstrSpec(2, 1, [])),
...
], ...)
And then the ipow range::
sage: print(rdf_wrapper)
# ...
metadata = InterpreterMetadata(...,
ipow_range=(-2147483648, 2147483647))
And that's it for the wrapper.
"""
from six.moves import cStringIO as StringIO
buff = StringIO()
self.write_wrapper(buff.write)
return buff.getvalue()
def get_pxd(self):
r"""
Return the code for the Cython .pxd file.
EXAMPLES:
First we get the InterpreterSpec for several interpreters::
sage: from sage_setup.autogen.interpreters import *
sage: rdf_spec = RDFInterpreter()
sage: rr_spec = RRInterpreter()
sage: el_spec = ElementInterpreter()
Then we get the corresponding .pxd::
sage: rdf_pxd = InterpreterGenerator(rdf_spec).get_pxd()
sage: rr_pxd = InterpreterGenerator(rr_spec).get_pxd()
sage: el_pxd = InterpreterGenerator(el_spec).get_pxd()
Now we can look through these pxd files.
Each .pxd starts with a file header; this can be
customized on a per-interpreter basis (some blank lines have been
elided below)::
sage: print(rdf_pxd)
# Automatically generated by ...
from cpython cimport PyObject
from sage.ext.fast_callable cimport Wrapper
...
sage: print(rr_pxd)
# ...
from sage.rings.real_mpfr cimport RealField_class, RealNumber
from sage.libs.mpfr cimport *
...
Next and last is the declaration of the wrapper class, which
starts off with a list of member declarations::
sage: print(rdf_pxd)
# ...
cdef class Wrapper_rdf(Wrapper):
cdef int _n_args
cdef double* _args
cdef int _n_constants
cdef double* _constants
cdef object _list_py_constants
cdef int _n_py_constants
cdef PyObject** _py_constants
cdef int _n_stack
cdef double* _stack
cdef int _n_code
cdef int* _code
...
Contrast the declaration of ``_stack`` here with the
ElementInterpreter version. To simplify our handling of
reference counting and garbage collection, in a Python-object
based interpreter, we allocate arrays as Python lists,
and then pull the array out of the innards of the list::
sage: print(el_pxd)
# ...
cdef object _list_stack
cdef int _n_stack
cdef PyObject** _stack
...
Then, at the end of the wrapper class, we declare a cdef method
for quickly calling the wrapper object from Cython. (This method
is omitted from Python-object based interpreters.)::
sage: print(rdf_pxd)
# ...
cdef bint call_c(self,
double* args,
double* result) except 0
sage: print(rr_pxd)
# ...
cdef bint call_c(self,
mpfr_t* args,
mpfr_t result) except 0
"""
from six.moves import cStringIO as StringIO
buff = StringIO()
self.write_pxd(buff.write)
return buff.getvalue()
def write_if_changed(fn, value):
r"""
Write value to the file named fn, if value is different than
the current contents.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: def last_modification(fn): return os.stat(fn).st_mtime
sage: fn = tmp_filename('gen_interp')
sage: write_if_changed(fn, 'Hello, world')
sage: t1 = last_modification(fn)
sage: open(fn).read()
'Hello, world'
sage: sleep(2) # long time
sage: write_if_changed(fn, 'Goodbye, world')
sage: t2 = last_modification(fn)
sage: open(fn).read()
'Goodbye, world'
sage: sleep(2) # long time
sage: write_if_changed(fn, 'Goodbye, world')
sage: t3 = last_modification(fn)
sage: open(fn).read()
'Goodbye, world'
sage: t1 == t2 # long time
False
sage: t2 == t3
True
"""
old_value = None
try:
with open(fn) as file:
old_value = file.read()
except IOError:
pass
if value != old_value:
# We try to remove the file, in case it exists. This is to
# automatically break hardlinks... see #5350 for motivation.
try:
os.remove(fn)
except OSError:
pass
with open(fn, 'w') as file:
file.write(value)
def build_interp(interp_spec, dir):
r"""
Given an InterpreterSpec, write the C interpreter and the Cython
wrapper (generate a pyx and a pxd file).
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: testdir = tmp_dir()
sage: rdf_interp = RDFInterpreter()
sage: build_interp(rdf_interp, testdir)
sage: open(testdir + '/interp_rdf.c').readline()
'/* Automatically generated by ... */\n'
"""
ig = InterpreterGenerator(interp_spec)
interp_fn = '%s/interp_%s.c' % (dir, interp_spec.name)
header_fn = '%s/interp_%s.h' % (dir, interp_spec.name)
wrapper_fn = '%s/wrapper_%s.pyx' % (dir, interp_spec.name)
pxd_fn = '%s/wrapper_%s.pxd' % (dir, interp_spec.name)
interp = ig.get_interpreter()
wrapper = ig.get_wrapper()
pxd = ig.get_pxd()
write_if_changed(interp_fn, interp)
write_if_changed(wrapper_fn, wrapper)
write_if_changed(pxd_fn, pxd)
def rebuild(dir):
r"""
Check whether the interpreter and wrapper sources have been written
since the last time this module was changed. If not, write them.
EXAMPLES::
sage: from sage_setup.autogen.interpreters import *
sage: testdir = tmp_dir()
sage: rebuild(testdir)
Building interpreters for fast_callable
sage: open(testdir + '/wrapper_el.pyx').readline()
'# Automatically generated by ...\n'
"""
# This line will show up in "sage -b" (once per upgrade, not every time
# you run it).
print("Building interpreters for fast_callable")
try:
os.makedirs(dir)
except OSError:
pass
interp = RDFInterpreter()
build_interp(interp, dir)
interp = CDFInterpreter()
build_interp(interp, dir)
interp = RRInterpreter()
build_interp(interp, dir)
interp = PythonInterpreter()
build_interp(interp, dir)
interp = ElementInterpreter()
build_interp(interp, dir)
with open(os.path.join(dir, '__init__.py'), 'w') as f:
f.write("# " + autogen_warn)
|
py | 1a4bcc2fb36ff036cc6b43fa4262de2e71eb6d39 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import unittest
from healthcheck import EnvironmentDump
class BasicEnvironmentDumpTest(unittest.TestCase):
def test_basic_check(self):
def custom_section():
return "My custom section"
ed = EnvironmentDump()
ed.add_section("custom_section", custom_section)
message, status, headers = ed.run()
jr = json.loads(message)
self.assertEqual("My custom section", jr["custom_section"])
def test_custom_section_signature(self):
def custom_section():
return "My custom section"
ed = EnvironmentDump(custom_section=custom_section)
message, status, headers = ed.run()
jr = json.loads(message)
self.assertEqual("My custom section", jr["custom_section"])
if __name__ == '__main__':
unittest.main()
|
py | 1a4bcd00e7c685eff12427fd3160b6e701d2c969 | import os
from . import configs
from flask import Flask
from flask_cors import CORS
from flask_redis import FlaskRedis
import psycopg2
redis_store = FlaskRedis()
root_dir = os.path.dirname(os.path.abspath(__file__))
conn = psycopg2.connect(
database=os.environ.get("DB_NAME", os.getenv("DB_NAME")),
user=os.environ.get("DB_USER", os.getenv("DB_USER")),
password=os.environ.get("DB_PASSWORD", os.getenv("DB_PASSWORD")),
sslmode=os.environ.get("DB_SSL", os.getenv("DB_SSL")),
port=os.environ.get("DB_PORT", os.getenv("DB_PORT")),
host=os.environ.get("DB_HOST", os.getenv("DB_HOST"))
)
conn.set_session(autocommit=True)
db = conn.cursor()
def create_app():
app = Flask(__name__)
app.config.from_object(configs.Config)
app.config['PROPAGATE_EXCEPTIONS'] = True
redis_store.init_app(app)
CORS(app, resources={r"/api/*": {"origins": "*"}})
from .controllers import api_blueprint
app.register_blueprint(api_blueprint)
return app |
py | 1a4bcd0ff4de0d38bc10ce176dd955239cebb6a0 | # Please complete the TODO items in this code
import asyncio
import json
import requests
KAFKA_CONNECT_URL = "http://localhost:8083/connectors"
CONNECTOR_NAME = "jdbc-connector"
def configure_connector():
"""Calls Kafka Connect to create the Connector"""
print("creating or updating kafka connect connector...")
rest_method = requests.post
resp = requests.get(f"{KAFKA_CONNECT_URL}/{CONNECTOR_NAME}")
if resp.status_code == 200:
return
#
# TODO: Complete the Kafka Connect Config below for a JDBC source connector.
# You should whitelist the `clicks` table, use incrementing mode and the
# incrementing column name should be id.
#
# See: https://docs.confluent.io/current/connect/references/restapi.html
# See: https://docs.confluent.io/current/connect/kafka-connect-jdbc/source-connector/source_config_options.html
#
resp = rest_method(
KAFKA_CONNECT_URL,
headers={"Content-Type": "application/json"},
data=json.dumps(
{
"name": "clicks-jdbc", # TODO
"config": {
"connector.class": "io.confluent.connect.jdbc.JdbcSourceConnector", # TODO
"topic.prefix": "exercise3-", # TODO
"mode": "incrementing", # TODO
"incrementing.column.name": "id", # TODO
"table.whitelist": "clicks", # TODO
"tasks.max": 1,
"connection.url": "jdbc:postgresql://localhost:5432/classroom",
"connection.user": "root",
"key.converter": "org.apache.kafka.connect.json.JsonConverter",
"key.converter.schemas.enable": "false",
"value.converter": "org.apache.kafka.connect.json.JsonConverter",
"value.converter.schemas.enable": "false",
},
}
),
)
# psql classroom
# kafka-topics --list --zookeeper localhost:2181
# kafka-console-consumer --topic exercise3-clicks --bootstrap-server localhost:9092 --from-beginning
# tail -f -n 10 /var/log/journal/confluent-kafka-connect.service.log
# https://www.postgresqltutorial.com/postgresql-administration/psql-commands/
# Ensure a healthy response was given
try:
resp.raise_for_status()
except:
print(f"failed creating connector: {json.dumps(resp.json(), indent=2)}")
exit(1)
print("connector created successfully.")
print("Use kafka-console-consumer and kafka-topics to see data!")
if __name__ == "__main__":
configure_connector()
|
py | 1a4bcd3aa228e056407b8365bd1b1a2d5eadbf83 | """yomdb URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from accounts.views import index
urlpatterns = [
path('admin/', admin.site.urls),
path('', index, name="index"),
path('accounts/', include('accounts.urls')),
path('watchlist/', include('watchlist.urls')),
]
|
py | 1a4bcd3bd5c800e4b026b9f3ce14f76225d25612 | #MIT License
#Copyright (c) 2021 SUBIN
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from pyrogram import Client, filters
HOME_TEXT = "<b>Helo, [{}](tg://user?id={})\n\nIam MusicPlayer 2.0 which plays music in Channels and Groups 24*7\n\nI can even Stream Youtube Live in Your Voicechat\n\nDeploy Your Own bot from source code below\n\nHit /help to know about available commands.</b>"
HELP = """
<b>Add the bot and User account in your Group with admin rights.
Start a VoiceChat
Use /play <song name> or use /play as a reply to an audio file or youtube link.
You can also use /dplay <song name> to play a song from Deezer.</b>
**Common Commands**:
**/play** Reply to an audio file or YouTube link to play it or use /play <song name>.
**/dplay** Play music from Deezer, Use /dplay <song name>
**/player** Show current playing song.
**/help** Show help for commands
**/playlist** Shows the playlist.
**Admin Commands**:
**/skip** [n] ... Skip current or n where n >= 2
**/join** Join voice chat.
**/leave** Leave current voice chat
**/vc** Check which VC is joined.
**/stop** Stop playing.
**/radio** Start Radio.
**/stopradio** Stops Radio Stream.
**/replay** Play from the beginning.
**/clean** Remove unused RAW PCM files.
**/pause** Pause playing.
**/resume** Resume playing.
**/mute** Mute in VC.
**/unmute** Unmute in VC.
**/restart** Restarts the Bot.
"""
@Client.on_message(filters.command('start'))
async def start(client, message):
buttons = [
[
InlineKeyboardButton('⚙️ Update Channel', url='https://t.me/musi_c_world'),
InlineKeyboardButton('🤖 Other Bots', url='https://t.me/Soulsharper'),
],
[
InlineKeyboardButton('👨🏼💻 Developer', url='https://t.me/Soulsharper'),
InlineKeyboardButton('🧩 Source', url='https://github.com/subinps/MusicPlayer'),
],
[
InlineKeyboardButton('👨🏼🦯 Help', callback_data='help'),
]
]
reply_markup = InlineKeyboardMarkup(buttons)
await message.reply(HOME_TEXT.format(message.from_user.first_name, message.from_user.id), reply_markup=reply_markup)
@Client.on_message(filters.command("help"))
async def show_help(client, message):
buttons = [
[
InlineKeyboardButton('⚙️ Update Channel', url='https://t.me/musi_c_world'),
InlineKeyboardButton('🤖 Other Bots', url='https://t.me/musi_c_world'),
],
[
InlineKeyboardButton('👨🏼💻 Developer', url='https://t.me/Soulsharper'),
InlineKeyboardButton('🧩 Source', url='https://github.com/subinps/MusicPlayer'),
]
]
reply_markup = InlineKeyboardMarkup(buttons)
await message.reply_text(
HELP,
reply_markup=reply_markup
)
|
py | 1a4bcd454613477c65baa958c4793bd5376e3fa0 | #
# Copyright (c) Pret-a-3D/Paolo Ciccone. All rights reserved.
# Modified by Fuzzy70/Lee Furssedonn with kind permission from Paolo Ciccone
#
from Reality_services import *
from Reality import *
# To customize this script all you need to do is to
# change the following variable
Re_sIBL_Map = ":Runtime:Textures:Reality:SingleIBLs:RingSolid:RingSolid_Large_20.ibl"
# Set the IBL Map
Reality.Scene().setIBLImage(ReResolvePoserPath(Re_sIBL_Map).encode("utf8"))
|
py | 1a4bce89532a7cd9c6c9f5ef3d842398cbdd3fd5 | import os
from math import isclose
from typing import Union
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score, recall_score, f1_score, fbeta_score, cohen_kappa_score, \
mean_squared_error, mean_absolute_error, roc_auc_score, average_precision_score
from oolearning import *
from oolearning.model_wrappers.ModelExceptions import AlreadyExecutedError
from tests.TestHelper import TestHelper
from tests.TimerTestCase import TimerTestCase
# noinspection PyMethodMayBeStatic
class ScoreTests(TimerTestCase):
@classmethod
def setUpClass(cls):
pass
def test_ScoreMediator(self):
######################################################################################################
# test ScoreMediator with a ScoreActualPredictedBase object
######################################################################################################
mock_data = pd.read_csv(os.path.join(os.getcwd(), TestHelper.ensure_test_directory('data/test_Evaluators/test_ConfusionMatrix_mock_actual_predictions.csv'))) # noqa
predictions_mock = mock_data.drop(columns=['actual', 'predictions'])
predictions_mock.columns = [1, 0]
score = KappaScore(converter=TwoClassThresholdConverter(threshold=0.41, positive_class=1))
# check that both the score is returned from the Mediator and the score object has the `value` set
accuracy = ScoreMediator.calculate(score,
data_x=None,
actual_target_variables=mock_data.actual,
predicted_values=predictions_mock)
assert isclose(accuracy, 0.37990215607221967) # check the score is returned
assert isclose(score.value, 0.37990215607221967) # check the score object's `value` is set
######################################################################################################
# test ScoreMediator with a ScoreClusteringBase object
######################################################################################################
data = TestHelper.get_iris_data()
cluster_data = data.drop(columns='species')
trainer = ModelTrainer(model=ClusteringHierarchical(),
model_transformations=[NormalizationVectorSpaceTransformer()],
scores=[SilhouetteScore()])
clusters = trainer.train_predict_eval(data=cluster_data,
hyper_params=ClusteringHierarchicalHP(num_clusters=3))
score = SilhouetteScore()
assert score.name == Metric.SILHOUETTE.value
assert isinstance(score, UtilityFunctionMixin)
assert isinstance(score, ScoreClusteringBase)
accuracy = ScoreMediator.calculate(score,
# NOTE: we have to pass in the TRANSFORMED data
data_x=NormalizationVectorSpaceTransformer().fit_transform(cluster_data), # noqa
actual_target_variables=None,
predicted_values=clusters)
assert isclose(accuracy, 0.5562322357473719) # check the score is returned
assert isclose(score.value, 0.5562322357473719) # check the score object's `value` is set
######################################################################################################
# test ScoreMediator with unsupported ScoreBaseObject
######################################################################################################
class MockScore(ScoreBase):
def _better_than(self, this: float, other: float) -> bool:
pass
def _calculate(self, *args) -> float:
pass
# noinspection PyPropertyDefinition
@property
def name(self) -> str:
pass
self.assertRaises(ValueError,
lambda: ScoreMediator.calculate(MockScore(),
data_x=None,
actual_target_variables=None,
predicted_values=None))
def test_BaseClass(self):
predicted = np.array([7, 10, 12, 10, 10, 8, 7, 8, 11, 13, 10, 8])
actual = np.array([6, 10, 14, 16, 7, 5, 5, 13, 12, 13, 8, 5])
rmse_eval = RmseScore()
accuracy = rmse_eval.calculate(actual_values=actual, predicted_values=predicted)
assert isclose(accuracy, 2.9154759474226504)
assert isclose(rmse_eval.value, 2.9154759474226504)
# should not be able to call calculate twice on same object (could indicate some sort of reuse error)
self.assertRaises(AlreadyExecutedError,
lambda: rmse_eval.calculate(actual_values=actual, predicted_values=predicted))
assert isinstance(rmse_eval, CostFunctionMixin)
def test_RmseScore(self):
predicted = np.array([7, 10, 12, 10, 10, 8, 7, 8, 11, 13, 10, 8])
actual = np.array([6, 10, 14, 16, 7, 5, 5, 13, 12, 13, 8, 5])
rmse_eval = RmseScore()
assert isinstance(rmse_eval, CostFunctionMixin)
assert isinstance(rmse_eval, ScoreBase)
assert rmse_eval.name == Metric.ROOT_MEAN_SQUARE_ERROR.value
rmse_eval.calculate(actual_values=actual, predicted_values=predicted)
assert isclose(np.sqrt(mean_squared_error(y_true=actual, y_pred=predicted)), rmse_eval.value)
######################################################################################################
# Test sorting
######################################################################################################
rmse_other = RmseScore()
rmse_other.calculate(actual_values=actual - 1, predicted_values=predicted + 1) # create more spread
assert isclose(rmse_other.value, 3.5355339059327378) # "worse"
eval_list = [rmse_other, rmse_eval] # "worse, better"
assert all([isclose(x, y) for x, y in zip([x.value for x in eval_list],
[3.5355339059327378, 2.9154759474226504])])
eval_list.sort() # "better, worse"
assert all([isclose(x, y) for x, y in zip([x.value for x in eval_list],
[2.9154759474226504, 3.5355339059327378])])
def test_RmsleScore(self):
predicted = np.array([7, 10, 12, 10, 10, 8, 7, 8, 11, 13, 10, 8])
actual = np.array([6, 10, 14, 16, 7, 5, 5, 13, 12, 13, 8, 5])
expected_score = np.sqrt(np.mean((np.log(1+actual) - np.log(1+predicted))**2))
rmse_eval = RmsleScore()
assert isinstance(rmse_eval, CostFunctionMixin)
assert isinstance(rmse_eval, ScoreBase)
assert rmse_eval.name == Metric.ROOT_MEAN_SQUARE_LOGARITHMIC_ERROR.value
rmse_eval.calculate(actual_values=actual, predicted_values=predicted)
assert isclose(expected_score, rmse_eval.value)
######################################################################################################
# Test sorting
######################################################################################################
rmse_other = RmsleScore()
rmse_other.calculate(actual_values=actual - 1, predicted_values=predicted + 1) # create more spread
expected_other_score = 0.42204286369153776
assert isclose(rmse_other.value, expected_other_score) # "worse"
eval_list = [rmse_other, rmse_eval] # "worse, better"
assert all([isclose(x, y) for x, y in zip([x.value for x in eval_list],
[expected_other_score, expected_score])])
eval_list.sort() # "better, worse"
assert all([isclose(x, y) for x, y in zip([x.value for x in eval_list],
[expected_score, expected_other_score])])
def test_MseScore(self):
predicted = np.array([7, 10, 12, 10, 10, 8, 7, 8, 11, 13, 10, 8])
actual = np.array([6, 10, 14, 16, 7, 5, 5, 13, 12, 13, 8, 5])
mse_eval = MseScore()
assert isinstance(mse_eval, CostFunctionMixin)
assert isinstance(mse_eval, ScoreBase)
assert mse_eval.name == Metric.MEAN_SQUARED_ERROR.value
score = mse_eval.calculate(actual_values=actual, predicted_values=predicted)
assert score == 8.5
assert isclose(score, RmseScore().calculate(actual_values=actual, predicted_values=predicted) ** 2)
######################################################################################################
# Test sorting
######################################################################################################
mse_other = MseScore()
mse_other.calculate(actual_values=actual - 1, predicted_values=predicted + 1) # create more spread
assert isclose(mse_other.value, 12.5) # "worse"
eval_list = [mse_other, mse_eval] # "worse, better"
assert all([isclose(x, y) for x, y in zip([x.value for x in eval_list],
[12.5, 8.5])])
eval_list.sort() # "better, worse"
assert all([isclose(x, y) for x, y in zip([x.value for x in eval_list],
[8.5, 12.5])])
def test_MspeScore(self):
predicted = np.array([7, 10, 12, 10, 10, 8, 7, 8, 11, 13, 10, 8])
actual = np.array([6, 10, 14, 16, 7, 5, 5, 13, 12, 13, 8, 5])
expected_score = float(np.mean(np.square((actual - predicted) / actual)))
mspe_eval = MspeScore()
assert isinstance(mspe_eval, CostFunctionMixin)
assert isinstance(mspe_eval, ScoreBase)
assert mspe_eval.name == Metric.MEAN_SQUARED_PERCENTAGE_ERROR.value
score = mspe_eval.calculate(actual_values=actual, predicted_values=predicted)
# need to round because of the small noise we get from adding a constant to help avoid divide-by-zero
assert isclose(score, expected_score)
######################################################################################################
# Test sorting
######################################################################################################
mspe_other = MspeScore()
mspe_other.calculate(actual_values=actual - 1, predicted_values=predicted + 1) # create more spread
assert isclose(mspe_other.value, 0.4770842603697943) # "worse"
eval_list = [mspe_other, mspe_eval] # "worse, better"
assert all([isclose(x, y) for x, y in zip([x.value for x in eval_list],
[0.4770842603697943, 0.12248815407984363])])
eval_list.sort() # "better, worse"
assert all([isclose(x, y) for x, y in zip([x.value for x in eval_list],
[0.12248815407984363, 0.4770842603697943])])
######################################################################################################
# make sure we don't get a divide by zero
######################################################################################################
predicted = np.array([0, 11, 0, 11, 0.1, 0, 7, 8, 11, 13, 0.5, 0])
actual = np.array([0, 11, 1, 16, 0, 0.3, 5, 13, 12, 13, 1, 0.24])
constant = 1
expected_score = float(np.mean(np.square(((actual + constant) - (predicted + constant)) / (actual + constant))))
mspe_eval = MspeScore(constant=constant)
score = mspe_eval.calculate(actual_values=actual, predicted_values=predicted)
assert isclose(expected_score, score)
def test_R_Squared_Score(self):
predicted = np.array([7, 10, 12, 10, 10, 8, 7, 8, 11, 13, 10, 8])
actual = np.array([6, 10, 14, 16, 7, 5, 5, 13, 12, 13, 8, 5])
r2_eval = RSquaredScore()
assert isinstance(r2_eval, UtilityFunctionMixin)
assert isinstance(r2_eval, ScoreBase)
assert r2_eval.name == Metric.R_SQUARED.value
r2_eval.calculate(actual_values=actual, predicted_values=predicted)
assert isclose(0.41714285714285715, r2_eval.value)
######################################################################################################
# Test sorting
######################################################################################################
r2_worse = RSquaredScore()
r2_worse.calculate(actual_values=actual - 1, predicted_values=predicted + 1) # create more spread
assert isclose(r2_worse.value, 0.1428571428571429) # "worse"
eval_list = [r2_worse, r2_eval] # "worse, better"
assert all([isclose(x, y) for x, y in zip([x.value for x in eval_list],
[0.1428571428571429, 0.41714285714285715])])
eval_list.sort() # "better, worse"
assert all([isclose(x, y) for x, y in zip([x.value for x in eval_list],
[0.41714285714285715, 0.1428571428571429])])
def test_MaeScore(self):
predicted = np.array([7, 10, 12, 10, 10, 8, 7, 8, 11, 13, 10, 8])
actual = np.array([6, 10, 14, 16, 7, 5, 5, 13, 12, 13, 8, 5])
mae_eval = MaeScore()
assert isinstance(mae_eval, CostFunctionMixin)
assert isinstance(mae_eval, ScoreBase)
assert mae_eval.name == Metric.MEAN_ABSOLUTE_ERROR.value
mae_eval.calculate(actual_values=actual, predicted_values=predicted)
assert isclose(mean_absolute_error(y_true=actual, y_pred=predicted), mae_eval.value)
######################################################################################################
# Test sorting
######################################################################################################
mae_other = MaeScore()
mae_other.calculate(actual_values=actual - 1, predicted_values=predicted + 1) # create more spread
assert isclose(mae_other.value, 3.1666666666666665) # "worse"
eval_list = [mae_other, mae_eval] # "worse, better"
assert all([isclose(x, y) for x, y in zip([x.value for x in eval_list],
[3.1666666666666665, 2.3333333333333335])])
eval_list.sort() # "better, worse"
assert all([isclose(x, y) for x, y in zip([x.value for x in eval_list],
[2.3333333333333335, 3.1666666666666665])])
def test_MapeScore(self):
predicted = np.array([7, 10, 12, 10, 10, 8, 7, 8, 11, 13, 10, 8])
actual = np.array([6, 10, 14, 16, 7, 5, 5, 13, 12, 13, 8, 5])
expected_score = float(np.mean(np.abs((actual - predicted) / actual)))
mspe_eval = MapeScore()
assert isinstance(mspe_eval, CostFunctionMixin)
assert isinstance(mspe_eval, ScoreBase)
assert mspe_eval.name == Metric.MEAN_ABSOLUTE_PERCENTAGE_ERROR.value
score = mspe_eval.calculate(actual_values=actual, predicted_values=predicted)
# need to round because of the small noise we get from adding a constant to help avoid divide-by-zero
assert isclose(score, expected_score)
######################################################################################################
# Test sorting
######################################################################################################
mspe_other = MapeScore()
mspe_other.calculate(actual_values=actual - 1, predicted_values=predicted + 1) # create more spread
assert isclose(mspe_other.value, 0.5417688792688793) # "worse"
eval_list = [mspe_other, mspe_eval] # "worse, better"
assert all([isclose(x, y) for x, y in zip([x.value for x in eval_list],
[0.5417688792688793, 0.2859203296703297])])
eval_list.sort() # "better, worse"
assert all([isclose(x, y) for x, y in zip([x.value for x in eval_list],
[0.2859203296703297, 0.5417688792688793])])
######################################################################################################
# make sure we don't get a divide by zero
######################################################################################################
predicted = np.array([0, 11, 0, 11, 0.1, 0, 7, 8, 11, 13, 0.5, 0])
actual = np.array([0, 11, 1, 16, 0, 0.3, 5, 13, 12, 13, 1, 0.24])
constant = 1
expected_score = float(np.mean(np.abs(((actual + constant) - (predicted + constant)) / (actual + constant))))
mspe_eval = MapeScore(constant=constant)
score = mspe_eval.calculate(actual_values=actual, predicted_values=predicted)
assert isclose(expected_score, score)
def test_AucROCScore(self):
mock_data = pd.read_csv(os.path.join(os.getcwd(), TestHelper.ensure_test_directory('data/test_Evaluators/test_ConfusionMatrix_mock_actual_predictions.csv'))) # noqa
predictions_mock = mock_data.drop(columns=['actual', 'predictions'])
predictions_mock.columns = [1, 0]
score = AucRocScore(positive_class=1)
assert isinstance(score, UtilityFunctionMixin)
assert isinstance(score, ScoreBase)
score.calculate(actual_values=mock_data.actual, predicted_values=predictions_mock)
assert isclose(score.value, roc_auc_score(y_true=mock_data.actual, y_score=mock_data.pos_probabilities)) # noqa
######################################################################################################
# Test sorting
######################################################################################################
# makes a 'worse
score_other = AucRocScore(positive_class=0)
score_other.calculate(actual_values=np.array([1 if x == 0 else 0 for x in mock_data.actual]),
predicted_values=predictions_mock)
assert isclose(score_other.value, roc_auc_score(y_true=mock_data.actual, y_score=mock_data.neg_probabilities)) # noqa
score_list = [score_other, score] # "worse, better"
assert all([isclose(x, y) for x, y in zip([x.value for x in score_list],
[0.25571324007807417, 0.74428675992192583])])
score_list.sort() # "better, worse"
assert all([isclose(x, y) for x, y in zip([x.value for x in score_list],
[0.74428675992192583, 0.25571324007807417])])
def test_AucPrecisionRecallScore(self):
mock_data = pd.read_csv(os.path.join(os.getcwd(), TestHelper.ensure_test_directory('data/test_Evaluators/test_ConfusionMatrix_mock_actual_predictions.csv'))) # noqa
predictions_mock = mock_data.drop(columns=['actual', 'predictions'])
predictions_mock.columns = [1, 0]
score = AucPrecisionRecallScore(positive_class=1)
assert isinstance(score, UtilityFunctionMixin)
assert isinstance(score, ScoreBase)
score.calculate(actual_values=mock_data.actual, predicted_values=predictions_mock)
assert isclose(score.value, average_precision_score(y_true=mock_data.actual, y_score=mock_data.pos_probabilities)) # noqa
######################################################################################################
# Test sorting
######################################################################################################
# makes a 'worse
score_other = AucPrecisionRecallScore(positive_class=0)
score_other.calculate(actual_values=np.array([1 if x == 0 else 0 for x in mock_data.actual]),
predicted_values=predictions_mock)
assert isclose(score_other.value, average_precision_score(y_true=mock_data.actual, y_score=mock_data.neg_probabilities)) # noqa
score_list = [score_other, score] # "worse, better"
assert all([isclose(x, y) for x, y in zip([x.value for x in score_list],
[0.28581244853623045, 0.6659419996895501])])
score_list.sort() # "better, worse"
assert all([isclose(x, y) for x, y in zip([x.value for x in score_list],
[0.6659419996895501, 0.28581244853623045])])
def test_KappaScore(self):
mock_data = pd.read_csv(os.path.join(os.getcwd(), TestHelper.ensure_test_directory('data/test_Evaluators/test_ConfusionMatrix_mock_actual_predictions.csv'))) # noqa
predictions_mock = mock_data.drop(columns=['actual', 'predictions'])
predictions_mock.columns = [1, 0]
score = KappaScore(converter=TwoClassThresholdConverter(threshold=0.41, positive_class=1))
assert isinstance(score, UtilityFunctionMixin)
assert isinstance(score, ScoreBase)
accuracy = score.calculate(actual_values=mock_data.actual, predicted_values=predictions_mock)
assert isclose(accuracy, 0.37990215607221967)
######################################################################################################
# Test sorting
######################################################################################################
# creates worse value
score_other = KappaScore(converter=HighestValueConverter()) # same as threshold of 0.5
# score_other = KappaScore(converter=TwoClassThresholdConverter(threshold=0.5))
score_other.calculate(actual_values=mock_data.actual, predicted_values=predictions_mock)
assert isclose(score_other.value, cohen_kappa_score(y1=mock_data.actual, y2=mock_data.predictions))
eval_list = [score_other, score] # "worse, better"
assert all([isclose(x, y) for x, y in zip([x.value for x in eval_list],
[0.34756903797404387, 0.37990215607221967])])
eval_list.sort() # "better, worse"
assert all([isclose(x, y) for x, y in zip([x.value for x in eval_list],
[0.37990215607221967, 0.34756903797404387])])
def test_FBetaScore(self):
mock_data = pd.read_csv(os.path.join(os.getcwd(), TestHelper.ensure_test_directory('data/test_Evaluators/test_ConfusionMatrix_mock_actual_predictions.csv'))) # noqa
predictions_mock = mock_data.drop(columns=['actual', 'predictions'])
predictions_mock.columns = [1, 0]
######################################################################################################
# F1 Score (i.e. Beta == 1)
######################################################################################################
score = F1Score(converter=TwoClassThresholdConverter(threshold=0.41, positive_class=1))
assert isinstance(score, UtilityFunctionMixin)
assert isinstance(score, ScoreBase)
score.calculate(actual_values=mock_data.actual, predicted_values=predictions_mock)
assert isclose(score.value, 0.6472491909385113)
######################################################################################################
# Test sorting
######################################################################################################
score_other = F1Score(converter=TwoClassThresholdConverter(threshold=0.5,
positive_class=1))
score_other.calculate(actual_values=mock_data.actual,
predicted_values=predictions_mock)
assert isclose(score_other.value, f1_score(y_true=mock_data.actual, y_pred=mock_data.predictions, pos_label=1)) # noqa
eval_list = [score_other, score] # "worse, better"
assert all([isclose(x, y) for x, y in zip([x.value for x in eval_list],
[0.5802707930367504, 0.6472491909385113])])
eval_list.sort() # "better, worse"
assert all([isclose(x, y) for x, y in zip([x.value for x in eval_list],
[0.6472491909385113, 0.5802707930367504])])
######################################################################################################
# FBeta Score (Beta == 0.5)
######################################################################################################
score_other = FBetaScore(converter=TwoClassThresholdConverter(threshold=0.5,
positive_class=1),
beta=0.5)
score_other.calculate(actual_values=mock_data.actual,
predicted_values=predictions_mock)
assert isclose(score_other.value,
fbeta_score(y_true=mock_data.actual,
y_pred=mock_data.predictions,
beta=0.5,
pos_label=1))
eval_list = [score_other, score] # "worse, better"
assert all([isclose(x, y) for x, y in zip([x.value for x in eval_list],
[0.6260434056761269, 0.6472491909385113])])
eval_list.sort() # "better, worse"
assert all([isclose(x, y) for x, y in zip([x.value for x in eval_list],
[0.6472491909385113, 0.6260434056761269])])
######################################################################################################
# FBeta Score (Beta == 1.5)
######################################################################################################
score_other = FBetaScore(converter=TwoClassThresholdConverter(threshold=0.5,
positive_class=1),
beta=1.5)
score_other.calculate(actual_values=mock_data.actual, predicted_values=predictions_mock)
assert isclose(score_other.value,
fbeta_score(y_true=mock_data.actual,
y_pred=mock_data.predictions,
beta=1.5,
pos_label=1))
eval_list = [score_other, score] # "worse, better"
assert all([isclose(x, y) for x, y in zip([x.value for x in eval_list],
[0.5542922114837977, 0.6472491909385113])])
eval_list.sort() # "better, worse"
assert all([isclose(x, y) for x, y in zip([x.value for x in eval_list],
[0.6472491909385113, 0.5542922114837977])])
def test_ErrorRate(self):
mock_data = pd.read_csv(os.path.join(os.getcwd(), TestHelper.ensure_test_directory('data/test_Evaluators/test_ConfusionMatrix_mock_actual_predictions.csv'))) # noqa
predictions_mock = mock_data.drop(columns=['actual', 'predictions'])
predictions_mock.columns = [1, 0]
score = ErrorRateScore(converter=TwoClassThresholdConverter(threshold=0.41, positive_class=1))
assert isinstance(score, CostFunctionMixin)
assert isinstance(score, ScoreBase)
score.calculate(actual_values=mock_data.actual, predicted_values=predictions_mock)
assert isclose(score.value, 0.30532212885154064)
######################################################################################################
# Test sorting
######################################################################################################
score_other = ErrorRateScore(converter=TwoClassThresholdConverter(threshold=0.5,
positive_class=1))
score_other.calculate(actual_values=mock_data.actual, predicted_values=predictions_mock)
assert isclose(score_other.value, 1 - 0.696078431372549)
eval_list = [score, score_other] # "worse, better"
# lower error is better
assert all([isclose(x, y) for x, y in zip([x.value for x in eval_list],
[0.30532212885154064, 0.303921568627451])])
eval_list.sort() # "better, worse"
assert all([isclose(x, y) for x, y in zip([x.value for x in eval_list],
[0.303921568627451, 0.30532212885154064])])
def test_BaseValue_is_int_or_float(self):
# bug, where positive predictive value (or any score) returns 0 (e.g. from DummyClassifier)
# which is an int (but base class originally checked only for float)
class MockScore(ScoreActualPredictedBase):
@property
def name(self) -> str:
return 'test'
def _better_than(self, this: float, other: float) -> bool:
return False
def _calculate(self, actual_values: np.ndarray, predicted_values: Union[np.ndarray, pd.DataFrame]) -> float: # noqa
return 0
score = MockScore()
# ensure .calculate doesn't explode
# noinspection PyTypeChecker
score.calculate(actual_values=[], predicted_values=[])
def test_Misc_scores(self):
"""
For example, these holdout_score_objects might be already tested in another class (e.g. Sensitivity is
tested via TwoClassEvaluator), but we want to verify we can instantiate and use.
"""
mock_data = pd.read_csv(os.path.join(os.getcwd(), TestHelper.ensure_test_directory('data/test_Evaluators/test_ConfusionMatrix_mock_actual_predictions.csv'))) # noqa
predictions_mock = mock_data.drop(columns=['actual', 'predictions'])
predictions_mock.columns = [1, 0]
######################################################################################################
score = SensitivityScore(converter=TwoClassThresholdConverter(positive_class=1, threshold=0.5))
assert isinstance(score, UtilityFunctionMixin)
assert isinstance(score, ScoreBase)
accuracy = score.calculate(actual_values=mock_data.actual, predicted_values=predictions_mock)
assert isclose(accuracy, recall_score(y_true=mock_data.actual, y_pred=mock_data.predictions))
assert isclose(score.value, recall_score(y_true=mock_data.actual, y_pred=mock_data.predictions))
######################################################################################################
score = SpecificityScore(converter=TwoClassThresholdConverter(positive_class=1, threshold=0.5))
assert isinstance(score, UtilityFunctionMixin)
assert isinstance(score, ScoreBase)
accuracy = score.calculate(actual_values=mock_data.actual, predicted_values=predictions_mock)
assert isclose(accuracy, 0.8183962264150944)
assert isclose(score.value, 0.8183962264150944)
######################################################################################################
score = PositivePredictiveValueScore(converter=TwoClassThresholdConverter(positive_class=1, threshold=0.5)) # noqa
assert isinstance(score, UtilityFunctionMixin)
assert isinstance(score, ScoreBase)
accuracy = score.calculate(actual_values=mock_data.actual, predicted_values=predictions_mock)
assert isclose(accuracy, 0.6607929515418502)
assert isclose(score.value, 0.6607929515418502)
######################################################################################################
score = NegativePredictiveValueScore(converter=TwoClassThresholdConverter(positive_class=1, threshold=0.5)) # noqa
assert isinstance(score, UtilityFunctionMixin)
assert isinstance(score, ScoreBase)
accuracy = score.calculate(actual_values=mock_data.actual, predicted_values=predictions_mock)
assert isclose(accuracy, 0.7125256673511293)
assert isclose(score.value, 0.7125256673511293)
######################################################################################################
score = AccuracyScore(converter=TwoClassThresholdConverter(positive_class=1, threshold=0.5))
assert isinstance(score, UtilityFunctionMixin)
assert isinstance(score, ScoreBase)
accuracy = score.calculate(actual_values=mock_data.actual, predicted_values=predictions_mock)
assert isclose(accuracy, accuracy_score(y_true=mock_data.actual, y_pred=mock_data.predictions))
assert isclose(score.value, accuracy_score(y_true=mock_data.actual, y_pred=mock_data.predictions))
def test_KappaScore_multi_class(self):
mock_data = pd.read_csv(os.path.join(os.getcwd(), TestHelper.ensure_test_directory('data/test_Evaluators/test_ConfusionMatrix_MultiClass_predictions.csv'))) # noqa
score = KappaScore(converter=HighestValueConverter())
assert isinstance(score, UtilityFunctionMixin)
assert isinstance(score, ScoreBase)
# noinspection SpellCheckingInspection
score.calculate(actual_values=mock_data.actual,
predicted_values=mock_data[['setosa', 'versicolor', 'virginica']])
assert isclose(score.value, cohen_kappa_score(y1=mock_data.actual, y2=mock_data.predicted_classes))
def test_Accuracy_multi_class(self):
mock_data = pd.read_csv(os.path.join(os.getcwd(), TestHelper.ensure_test_directory('data/test_Evaluators/test_ConfusionMatrix_MultiClass_predictions.csv'))) # noqa
score = AccuracyScore(converter=HighestValueConverter())
assert isinstance(score, UtilityFunctionMixin)
assert isinstance(score, ScoreBase)
# noinspection SpellCheckingInspection
score.calculate(actual_values=mock_data.actual,
predicted_values=mock_data[['setosa', 'versicolor', 'virginica']])
assert isclose(score.value, accuracy_score(y_true=mock_data.actual, y_pred=mock_data.predicted_classes)) # noqa
|
py | 1a4bcea4165aa77c450bb5de3fc53ea7f4d18c14 | import logging
import torch
from ..datasets import build_loader
from ..tasks import build_task
from ..utils import get_default_parser, env_setup, \
Timer, get_eta, dist_get_world_size
def add_args(parser):
## Basic options
parser.add_argument('--dataset', type=str, default='CIFAR10',
help='dataset')
parser.add_argument('--data-root', type=str, required=True,
help='root directory of the dataset')
parser.add_argument('--n-epoch', type=int, default=20,
help='# of epochs to train')
parser.add_argument('--batch-size', type=int, default=128,
help='batch size for training (per node)')
parser.add_argument('--n-worker', type=int, default=8,
help='# of workers for data prefetching (per node)')
parser.add_argument('--lr', type=float, default=0.1,
help='base learning rate (default: 0.1)')
# parser.add_argument('--data-root', type=str, default='D:/Data/SmallDB/CIFAR-10',
# help='root directory of the dataset')
# parser.add_argument('--n-epoch', type=int, default=1,
# help='# of epochs to train')
## Hyperparameters
parser.add_argument('--optim', type=str, default='SGD',
help='optimizer (default: SGD)')
parser.add_argument('--wd', type=float, default=5e-4,
help='weight decay (default: 5e-4)')
parser.add_argument('--momentum', type=float, default=0.9,
help='optimizer momentum (default: 0.9)')
parser.add_argument('--nesterov', action='store_true', default=False,
help='enables nesterov momentum')
parser.add_argument('--lr-schedule', type=str, default='Linear',
help='learning rate schedule (default: Linear)')
parser.add_argument('--lr-update-per-epoch', action='store_true', default=False,
help='update learning rate after each epoch instead of each iter by default')
parser.add_argument('--lr-decay-epoch', type=int, default=50,
help='learning rate schedule (default: 50)')
parser.add_argument('--lr-schedule-gamma', type=float, default=0.1,
help='intepretation depends on lr_schedule (default: 0.1)')
## Training Settings
parser.add_argument('--reset', action='store_true', default=False,
help='DANGER: purge the exp_dir and start a fresh new training run')
parser.add_argument('--pretrain', type=str, default=None,
help='pretrained weights')
parser.add_argument('--batch-size-per-gpu', type=int, default=None,
help='alternative to batch_size (and overrides it)')
parser.add_argument('--n-worker-per-gpu', type=int, default=None,
help='alternative n_worker (and overrides it)')
parser.add_argument('--epoch-size', type=int, default=float('inf'),
help='maximum # of examples per epoch')
parser.add_argument('--no-val', action='store_false', dest='val', default=True,
help='turn off validation')
parser.add_argument('--log-interval', type=int, default=50,
help='after every how many iters to log the training status')
parser.add_argument('--save-interval', type=int, default=5,
help='after every how many epochs to save the learned model')
parser.add_argument('--val-interval', type=int, default=5,
help='after every how many epochs to save the learned model')
parser.add_argument('--train-gather', action='store_true', default=False,
help='gather results over batches during training, which is required '
'to compute metrics over the entire training set at the end of '
'every epoch',
)
def main():
## Overall timer
tmr_main = Timer()
## Argument parser and environment setup
parser = get_default_parser('llcv - training script')
add_args(parser)
args = env_setup(parser, 'train', ['data_root', 'pretrain'])
## Prepare the dataloader
train_loader = build_loader(args, is_train=True)
logging.info(f'# of classes: {len(train_loader.dataset.classes)}')
n_train = len(train_loader.dataset)
logging.info(f'# of training examples: {n_train}')
assert n_train
if args.epoch_size < n_train:
logging.warning(f'Epoch size ({args.epoch_size}) is set to smaller than the # of training examples')
train_epoch_size = args.epoch_size
else:
train_epoch_size = n_train
if args.val:
val_loader = build_loader(args, is_train=False)
n_val = len(val_loader.dataset)
logging.info(f'# of validation examples: {n_val}')
else:
n_val = 0
## Initialize task
task = build_task(args, train_loader, is_train=True)
if task.resume_epoch >= args.n_epoch:
logging.warning(f'The model is already trained for {task.resume_epoch} epochs')
return
if n_val and task.has_val_score:
if task.resume_epoch:
best_epoch, best_score = task.query_best_model()
else:
best_score = best_epoch = 0
## Start training
last_saved_epoch = 0
n_iter_epoch = 0
n_iter_total = (args.n_epoch - task.resume_epoch)*len(train_loader)
speed_ratio = dist_get_world_size()
logging.info('Training starts')
tmr_train = Timer()
for epoch in range(task.resume_epoch + 1, args.n_epoch + 1):
task.train_mode(args.train_gather)
n_seen = 0
n_warpup = 0
t_warmup = 0
tmr_epoch = Timer()
for i, data in enumerate(train_loader):
i += 1
# the last batch can be smaller than normal
this_batch_size = len(data[0])
tmr_iter = Timer()
task.forward(data)
task.backward()
tmr_iter.stop()
if not args.lr_update_per_epoch:
task.update_lr_iter()
n_seen += this_batch_size
t_iter = tmr_iter.elapsed()
if i <= args.timing_warmup_iter:
n_warpup += this_batch_size
t_warmup += t_iter
if i % args.log_interval == 0:
t_total = tmr_epoch.check()
if i <= args.timing_warmup_iter:
ave_speed = n_seen/t_total if t_total else float('inf')
else:
ave_speed = (n_seen - n_warpup)/(t_total - t_warmup)if (t_total - t_warmup) else float('inf')
ave_speed *= speed_ratio
task.log_iter(
'train e%d: %4d/%4d, %5.4gHz' %
(epoch, i, len(train_loader), ave_speed),
', ETA: ' + get_eta(tmr_train.check(), n_iter_epoch + i, n_iter_total),
)
task.log_iter_tb(
(epoch-1)*len(train_loader) + i,
is_train=True,
)
if n_seen >= train_epoch_size:
break
task.dist_gather(is_train=True)
task.log_epoch(f'train e{epoch} summary: ')
task.log_epoch_tb(epoch, is_train=True)
task.reset_epoch()
if n_val and (epoch % args.val_interval == 0 or epoch == args.n_epoch):
n_seen = 0
task.test_mode()
n_warpup = 0
t_warmup = 0
tmr_val = Timer()
for i, data in enumerate(val_loader):
i += 1
this_batch_size = len(data[0])
tmr_iter = Timer()
with torch.no_grad():
task.forward(data)
tmr_iter.stop()
n_seen += this_batch_size
t_iter = tmr_iter.elapsed()
if i <= args.timing_warmup_iter:
n_warpup += this_batch_size
t_warmup += t_iter
if i % args.log_interval == 0:
t_total = tmr_val.check()
if i <= args.timing_warmup_iter:
ave_speed = n_seen/t_total if t_total else float('inf')
else:
ave_speed = (n_seen - n_warpup)/(t_total - t_warmup)if (t_total - t_warmup) else float('inf')
ave_speed *= speed_ratio
task.log_iter(
'val e%d: %4d/%4d, %6.5gHz' %
(epoch, i, len(val_loader), ave_speed),
)
task.dist_gather(is_train=False)
if task.has_val_score:
new_score = task.get_test_scores()[0]
if new_score > best_score:
best_score = new_score
best_epoch = epoch
task.mark_best_model(best_epoch, best_score)
task.save(epoch)
last_saved_epoch = epoch
task.log_epoch(f'val e{epoch} summary: ')
task.log_epoch_tb(epoch, is_train=False)
task.reset_epoch()
tmr_epoch.stop()
logging.info('end of epoch %d/%d: epoch time: %s, ETA: %s' %
(epoch, args.n_epoch, tmr_epoch.elapsed(to_str=True),
get_eta(tmr_train.check(), epoch, args.n_epoch))
)
if last_saved_epoch != epoch and epoch % args.save_interval == 0:
task.save(epoch)
last_saved_epoch = epoch
if args.lr_update_per_epoch:
task.update_lr_epoch()
n_iter_epoch += len(train_loader)
if last_saved_epoch != args.n_epoch:
# saving the last epoch if n_epoch is not divisible by save_interval
task.save(args.n_epoch)
tmr_main.stop()
logging.info(f'Training finished with total elapsed time {tmr_main.elapsed(to_str=True)}')
if n_val and task.has_val_score:
logging.info(f'The best model is obtained at epoch {best_epoch} with score {best_score:.6g}')
if __name__ == '__main__':
main()
|
py | 1a4bced80f0eccd915e10dad3714eff330c0298c | import tensorflow as tf
from tensorflow_addons.layers import InstanceNormalization
import numpy as np
import matplotlib.pyplot as plt
import tensorflow.keras.layers as layers
from layers import *
def build_discriminator(input_shape, k_init):
inp = tf.keras.layers.Input(shape=input_shape)
x = tf.keras.layers.Conv2D(64, (4, 4), kernel_initializer=k_init, strides=2, padding='same')(inp)
x = tf.keras.layers.LeakyReLU(alpha=0.2)(x)
x = tf.keras.layers.Conv2D(128, (4, 4), kernel_initializer=k_init, strides=2, padding='same')(x)
x = InstanceNormalization(axis=-1)(x)
x = tf.keras.layers.LeakyReLU(alpha=0.2)(x)
x = tf.keras.layers.Conv2D(256, (4, 4), kernel_initializer=k_init, strides=2, padding='same')(x)
x = InstanceNormalization(axis=-1)(x)
x = tf.keras.layers.LeakyReLU(alpha=0.2)(x)
x = tf.keras.layers.Conv2D(256, (4, 4), kernel_initializer=k_init, strides=2, padding='same')(x)
x = InstanceNormalization(axis=-1)(x)
x = tf.keras.layers.LeakyReLU(alpha=0.2)(x)
x = tf.keras.layers.Conv2D(512, (4, 4), kernel_initializer=k_init, padding='same')(x)
x = InstanceNormalization(axis=-1)(x)
x = tf.keras.layers.LeakyReLU(alpha=0.2)(x)
output_x = tf.keras.layers.Conv2D(1, kernel_size=(4, 4), padding='same', kernel_initializer=k_init)(x)
output_x = InstanceNormalization(axis=-1)(output_x)
output_x = tf.keras.layers.LeakyReLU(alpha=0.2)(output_x)
return tf.keras.Model(inputs=inp, outputs=output_x)
|
py | 1a4bceda0c4e8ff3105872b08d0c21d4b1d47f2e | """collection of methods for generating merger populations and rates"""
import utils
import sfh
from astropy.cosmology import Planck15 as cosmo
from astropy import units as u
import numpy as np
from tqdm import tqdm
def get_mergers(zbins, mets, metallicities, alpha, z_interp, downsample):
met_weights = sfh.get_metallicity_weights(zbins, mets)
mergers_tot = []
for met_read, met, ii in tqdm(zip(metallicities, mets, range(len(metallicities))), total=len(metallicities)):
BBH, mass_stars = utils.get_cosmic_data(alpha=alpha, met_read=met_read)
mergers = []
for zbin_low, zbin_high, jj in zip(zbins[1:], zbins[:-1], range(len(zbins))):
# get the midpoint of the zbin
midz = zbin_low + (zbin_high - zbin_low) / 2
# get the star formation rate from Madau & Fragos (2017)
sfr = sfh.madau_17(midz) * u.Msun * u.yr ** (-1) * u.Mpc ** (-3)
# we want *anything* that merges between the formation and today!
t_delay_min = 0
t_delay_max = cosmo.lookback_time(midz).to(u.Myr).value
BBH_merge = BBH.loc[(BBH.tphys > t_delay_min) & (BBH.tphys < t_delay_max)].copy()
if len(BBH_merge) > 0:
# log the formation and merger times
BBH_merge['t_form'] = cosmo.lookback_time(midz).to(u.Myr).value
BBH_merge['t_merge'] = BBH_merge.t_form - BBH_merge.tphys
# filter just to be safe
BBH_merge = BBH_merge.loc[BBH_merge.t_merge > 1e-3].copy()
# log the merger redshift
BBH_merge['z_merge'] = z_interp(BBH_merge.t_merge)
# log the formation redshift
BBH_merge['z_form'] = np.ones(len(BBH_merge)) * midz
# down sample because we have too much data
BBH_merge = BBH_merge.sample(int(len(BBH_merge) / downsample))
# calculate the number of mergers per unit mass formed
#merger_rate_per_mass = BBH_merge['initial_mass'] / (mass_stars / downsample)
# calculate the total amount of mass formed at redshift bin: midz and metallicity: met
SFR_met_weighted = (sfr * met_weights[ii, jj]).to(u.Msun * u.Gpc ** (-3) * u.yr ** (-1))
# calculate the number of merging BBH formed per comoving volume per source-frame time
BBH_merge['dN_dVdtf_source'] = (SFR_met_weighted * (1/((mass_stars * u.Msun) / downsample))).value
# account for the expansion between the formation time and merger time for each BBH
dt_f_dt_m = (1 + BBH_merge['z_merge']) * cosmo.H(BBH_merge['z_merge']) / \
((1 + BBH_merge['z_form']) * cosmo.H(BBH_merge['z_form']))
# calculate the number of merging BBHs per source-frame time per covoving volume
BBH_merge['dN_dVdtm_source'] = BBH_merge['dN_dVdtf_source'] * dt_f_dt_m
# calculate the number of merging BBHs per comvoing volume in the detector frame
BBH_merge['dN_dVdtm_det'] = BBH_merge['dN_dVdtm_source'] * 1 / (1 + BBH_merge['z_merge'])
# differential comoving volume at merger redshift
if len(mergers) > 0:
BBH_merge['dV_dz'] = cosmo.differential_comoving_volume(np.array(BBH_merge['z_merge'].values)).to(
u.Gpc ** (3) * u.steradian ** (-1)).value * (4 * np.pi)
if len(mergers) == 0:
mergers = BBH_merge
else:
mergers = mergers.append(BBH_merge)
else:
continue
if len(mergers_tot) == 0:
mergers_tot = mergers
else:
mergers_tot = mergers_tot.append(mergers)
if len(mergers_tot) > 0:
return mergers_tot
else:
return []
|
py | 1a4bcf0ed1e012304111f3270a41b809cd3a39f8 | from .mgmt_testcase import AzureMgmtTestCase, AzureMgmtPreparer
from .azure_testcase import AzureTestCase, is_live, get_region_override
from .resource_testcase import (
FakeResource,
ResourceGroupPreparer,
RandomNameResourceGroupPreparer,
CachedResourceGroupPreparer,
)
from .storage_testcase import (
FakeStorageAccount,
StorageAccountPreparer,
BlobAccountPreparer,
CachedStorageAccountPreparer,
)
from .keyvault_preparer import KeyVaultPreparer
from .powershell_preparer import PowerShellPreparer
__all__ = [
"AzureMgmtTestCase",
"AzureMgmtPreparer",
"FakeResource",
"ResourceGroupPreparer",
"StorageAccountPreparer",
"BlobAccountPreparer",
"CachedStorageAccountPreparer",
"FakeStorageAccount",
"AzureTestCase",
"is_live",
"get_region_override",
"KeyVaultPreparer",
"RandomNameResourceGroupPreparer",
"CachedResourceGroupPreparer",
"PowerShellPreparer",
]
|
py | 1a4bcfc45cae8832d1d6881055650852d4cc1f37 | import ctypes
import enum
import numpy as np
from astropy import units as u
from panoptes.pocs.camera.sdk import AbstractSDKDriver
from panoptes.utils import error
from panoptes.utils import get_quantity_value
####################################################################################################
#
# Main ASI Driver class.
#
# The methods of this class call the functions fron ASICamera2.h using the ctypes foreign function
# library. Based on v1.13.0930 of the ZWO ASI SDK.
#
####################################################################################################
class ASIDriver(AbstractSDKDriver):
def __init__(self, library_path=None, **kwargs):
"""Main class representing the ZWO ASI library interface.
On construction loads the shared object/dynamically linked version of the ASI SDK library,
which must be already installed (see https://astronomy-imaging-camera.com/software-drivers).
The name and location of the shared library can be manually specified with the library_path
argument, otherwise the ctypes.util.find_library function will be used to try to locate it.
Args:
library_path (str, optional): path to the library e.g. '/usr/local/lib/libASICamera2.so'
Returns:
`~pocs.camera.libasi.ASIDriver`
Raises:
panoptes.utils.error.NotFound: raised if library_path not given & find_libary fails to
locate the library.
OSError: raises if the ctypes.CDLL loader cannot load the library.
"""
super().__init__(name='ASICamera2', library_path=library_path, **kwargs)
self._product_ids = self.get_product_ids() # Supported camera models
# Methods
def get_SDK_version(self):
""" Get the version of the ZWO ASI SDK """
# First set return type for function to pointer to null terminated string
self._CDLL.ASIGetSDKVersion.restype = ctypes.c_char_p
version = self._CDLL.ASIGetSDKVersion().decode('ascii') # Get bytes so decode to str
version = version.replace(', ', '.') # Format the version string properly
return version
def get_devices(self):
"""Gets currently connected camera info.
Returns:
dict: All currently connected camera serial numbers with corresponding integer
camera IDs.
Notes:
If a camera does not have a serial number it will attempt to fall back to string ID.
Cameras with neither serial number nor string ID will be left out of the dictionary
as they have no unique indentifier.
"""
n_cameras = self.get_num_of_connected_cameras()
if n_cameras == 0:
raise error.PanError("No ZWO ASI camera devices found")
# Get the IDs
cameras = {}
for camera_index in range(n_cameras):
info = self.get_camera_property(camera_index)
camera_ID = info['camera_ID']
self.open_camera(camera_ID)
try:
serial_number = self.get_serial_number(camera_ID)
except RuntimeError as err:
# If at first you don't succeed, try, except, else, finally again.
self.logger.warning(f"Error getting serial number: {err}")
try:
string_ID = self.get_ID(camera_ID)
except RuntimeError as err:
self.logger.warning(f"Error getting string ID: {err}")
msg = f"Skipping ZWO ASI camera {camera_ID} with no serial number or string ID."
self.logger.error(msg)
break
else:
msg = f"Using string ID '{string_ID}' in place of serial number."
self.logger.warning(msg)
serial_number = string_ID
finally:
self.close_camera(camera_ID)
cameras[serial_number] = camera_ID
self.logger.debug(f"Got camera serial numbers: {list(cameras.keys())}")
return cameras
def get_num_of_connected_cameras(self):
""" Get the count of connected ASI cameras """
count = self._CDLL.ASIGetNumOfConnectedCameras() # Return type is int, needs no Pythonising
self.logger.debug("Found {} connected ASI cameras".format(count))
return count
def get_product_ids(self):
"""Get product IDs of cameras supported by the SDK."""
n_pids = self._CDLL.ASIGetProductIDs(0) # Call once to get number of product IDs
if n_pids > 0:
# Make array of C ints of required size.
product_ids = (ctypes.c_int * n_pids)()
# Call again to get product IDs. Should get same n_pids as before.
assert n_pids == self._CDLL.ASIGetProductIDs(ctypes.byref(product_ids))
else:
self.logger.error("Error getting supported camera product IDs from SDK.")
raise RuntimeError("ZWO SDK support 0 SDK products?")
self.logger.debug("Got {} supported camera product IDs from SDK.".format(n_pids))
return list(product_ids)
def get_camera_property(self, camera_index):
""" Get properties of the camera with given index """
camera_info = CameraInfo()
error_code = self._CDLL.ASIGetCameraProperty(ctypes.byref(camera_info), camera_index)
if error_code != ErrorCode.SUCCESS:
msg = "Error calling ASIGetCameraProperty: {}".format(ErrorCode(error_code).name)
self.logger.error(msg)
raise RuntimeError(msg)
pythonic_info = self._parse_info(camera_info)
self.logger.debug("Got info from camera {camera_ID}, {name}".format(**pythonic_info))
return pythonic_info
def get_camera_property_by_id(self, camera_ID):
"""Get properties of the camera with a given integer ID."""
camera_info = CameraInfo()
self._call_function('ASIGetCameraPropertyByID',
camera_ID,
ctypes.byref(camera_info))
pythonic_info = self._parse_info(camera_info)
self.logger.debug("Got info from camera {camera_ID}, {name}".format(**pythonic_info))
return pythonic_info
def open_camera(self, camera_ID):
""" Open camera with given integer ID """
self._call_function('ASIOpenCamera', camera_ID)
self.logger.debug("Opened camera {}".format(camera_ID))
def init_camera(self, camera_ID):
""" Initialise camera with given integer ID """
self._call_function('ASIInitCamera', camera_ID)
self.logger.debug("Initialised camera {}".format(camera_ID))
def close_camera(self, camera_ID):
""" Close camera with given integer ID """
self._call_function('ASICloseCamera', camera_ID)
self.logger.debug("Closed camera {}".format(camera_ID))
def get_ID(self, camera_ID):
"""Get string ID from firmaware for the camera with given integer ID
The saved ID is an array of 8 unsigned chars for some reason.
"""
struct_ID = ID()
self._call_function('ASIGetID', camera_ID, ctypes.byref(struct_ID))
bytes_ID = bytes(struct_ID.id)
string_ID = bytes_ID.decode()
self.logger.debug("Got string ID '{}' from camera {}".format(string_ID, camera_ID))
return string_ID
def set_ID(self, camera_ID, string_ID):
"""Save string ID to firmware of camera with given integer ID
The saved ID is an array of 8 unsigned chars for some reason. To preserve some sanity
this method takes an 8 byte UTF-8 string as input.
"""
bytes_ID = string_ID.encode() # Convert string to bytes
if len(bytes_ID) > 8:
bytes_ID = bytes_ID[:8] # This may chop out part of a UTF-8 multibyte character
self.logger.warning("New ID longer than 8 bytes, truncating {} to {}".format(
string_ID, bytes_ID.decode()))
else:
bytes_ID = bytes_ID.ljust(8) # Pad to 8 bytes with spaces, if necessary
uchar_ID = (ctypes.c_ubyte * 8).from_buffer_copy(bytes_ID)
self._call_function('ASISetID', camera_ID, ID(uchar_ID))
self.logger.debug("Set camera {} string ID to '{}'".format(camera_ID, bytes_ID.decode()))
def get_num_of_controls(self, camera_ID):
""" Gets the number of control types supported by the camera with given integer ID """
n_controls = ctypes.c_int()
self._call_function('ASIGetNumOfControls', camera_ID, ctypes.byref(n_controls))
n_controls = n_controls.value # Convert from ctypes c_int type to Python int
self.logger.debug("Camera {} has {} controls".format(camera_ID, n_controls))
return n_controls
def get_control_caps(self, camera_ID):
""" Gets the details of all the controls supported by the camera with given integer ID """
n_controls = self.get_num_of_controls(camera_ID) # First get number of controls
controls = {}
for i in range(n_controls):
control_caps = ControlCaps()
self._call_function('ASIGetControlCaps',
camera_ID,
ctypes.c_int(i),
ctypes.byref(control_caps))
control = self._parse_caps(control_caps)
controls[control['control_type']] = control
self.logger.debug("Got details of {} controls from camera {}".format(n_controls, camera_ID))
return controls
def get_control_value(self, camera_ID, control_type):
""" Gets the value of the control control_type from camera with given integer ID """
value = ctypes.c_long()
is_auto = ctypes.c_int()
self._call_function('ASIGetControlValue',
camera_ID,
ControlType[control_type],
ctypes.byref(value),
ctypes.byref(is_auto))
nice_value = self._parse_return_value(value, control_type)
return nice_value, bool(is_auto)
def set_control_value(self, camera_ID, control_type, value):
""" Sets the value of the control control_type on camera with given integet ID """
if value == 'AUTO':
# Apparently need to pass current value when turning auto on
auto = True
value = self.get_control_value(camera_ID, control_type)[0]
else:
auto = False
self._call_function('ASISetControlValue',
camera_ID,
ctypes.c_int(ControlType[control_type]),
self._parse_input_value(value, control_type),
ctypes.c_int(auto))
self.logger.debug("Set {} to {} on camera {}".format(control_type,
'AUTO' if auto else value,
camera_ID))
def get_roi_format(self, camera_ID):
""" Get the ROI size and image format setting for camera with given integer ID """
width = ctypes.c_int()
height = ctypes.c_int()
binning = ctypes.c_int()
image_type = ctypes.c_int()
self._call_function('ASIGetROIFormat',
camera_ID,
ctypes.byref(width),
ctypes.byref(height),
ctypes.byref(binning),
ctypes.byref(image_type))
roi_format = {'width': width.value * u.pixel,
'height': height.value * u.pixel,
'binning': binning.value,
'image_type': ImgType(image_type.value).name}
return roi_format
def set_roi_format(self, camera_ID, width, height, binning, image_type):
""" Set the ROI size and image format settings for the camera with given integer ID """
width = int(get_quantity_value(width, unit=u.pixel))
height = int(get_quantity_value(height, unit=u.pixel))
binning = int(binning)
self._call_function('ASISetROIFormat',
camera_ID,
ctypes.c_int(width),
ctypes.c_int(height),
ctypes.c_int(binning),
ctypes.c_int(ImgType[image_type]))
self.logger.debug("Set ROI, format on camera {} to {}x{}/{}, {}".format(
camera_ID, width, height, binning, image_type))
def get_start_position(self, camera_ID):
""" Get position of the upper left corner of the ROI for camera with given integer ID
Args:
camera_ID (int): integer ID of the camera
Returns:
(astropy.units.Quantity, astropy.units.Quantity): x, y coordinates of the upper left
corner of the ROI. Note, these are in binned pixels.
"""
start_x = ctypes.c_int()
start_y = ctypes.c_int()
self._call_function('ASIGetStartPos',
camera_ID,
ctypes.byref(start_x),
ctypes.byref(start_y))
start_x = start_x.value * u.pixel
start_y = start_y.value * u.pixel
return start_x, start_y
def set_start_position(self, camera_ID, start_x, start_y):
""" Set position of the upper left corner of the ROI for camera with given integer ID """
start_x = int(get_quantity_value(start_x, unit=u.pixel))
start_y = int(get_quantity_value(start_y, unit=u.pixel))
self._call_function('ASISetStartPos',
camera_ID,
ctypes.c_int(start_x),
ctypes.c_int(start_y))
self.logger.debug("Set ROI start position of camera {} to ({}, {})".format(
camera_ID, start_x, start_y))
def get_dropped_frames(self, camera_ID):
"""Get the number of dropped frames during video capture."""
n_dropped_frames = ctypes.c_int()
self._call_function('ASIGetDroppedFrames',
camera_ID,
ctypes.byref(n_dropped_frames))
self.logger_debug("Camera {} has dropped {} frames.".format(camera_ID, n_dropped_frames))
return n_dropped_frames
def enable_dark_subtract(self, camera_ID, dark_file_path):
"""Enable dark subtraction (not implemented).
You almost certainly wouldn't want to use this as it only works with images taken in
RGB8 format and only with dark frames saved as .BMP files. Far better to do dark
subtraction in post-processing.
"""
raise NotImplementedError
def disable_dark_subtract(self, camera_ID):
"""Disable dark subtraction.
May need to call this as dark current subtraction settings persist in the registry
on Windows.
"""
self._call_function('ASIDisableDarkSubtract',
camera_ID)
self.logger.debug("Dark subtraction on camera {} disabled.".format(camera_ID))
def pulse_guide_on(self, camera_ID, direction):
"""Turn on PulseGuide on ST4 port of given camera in given direction."""
self._call_function('ASIPulseGuideOn',
camera_ID,
GuideDirection[direction])
dname = GuideDirection[direction].name
msg = f"PulseGuide on camera {camera_ID} on in direction {dname}."
self.logger.debug(msg)
def pulse_guide_off(self, camera_ID, direction):
"""Turn off PulseGuide on ST4 port of given camera in given direction."""
self._call_function('ASIPulseGuideOff',
camera_ID,
GuideDirection[direction])
dname = GuideDirection[direction].name
msg = f"PulseGuide on camera {camera_ID} off in direction {dname}."
self.logger.debug(msg)
def get_gain_offset(self, camera_ID):
"""Get pre-setting parameters."""
offset_highest_dr = ctypes.c_int()
offset_unity_gain = ctypes.c_int()
gain_lowest_rn = ctypes.c_int()
offset_lowest_rn = ctypes.c_int()
self._call_function('ASIGetGainOffset',
camera_ID,
ctypes.byref(offset_highest_dr),
ctypes.byref(offset_unity_gain),
ctypes.byref(gain_lowest_rn),
ctypes.byref(offset_lowest_rn))
self.logger.debug('Got pre-setting parameters from camera {}.'.format(camera_ID))
return offset_highest_dr, offset_unity_gain, gain_lowest_rn, offset_lowest_rn
def get_camera_supported_mode(self, camera_ID):
"""Get supported trigger modes for camera with given integer ID."""
modes_struct = SupportedMode()
self._call_function('ASIGetCameraSupportMode',
camera_ID,
ctypes.byref(modes_struct.modes))
supported_modes = []
for mode_int in modes_struct.modes:
if mode_int == CameraMode.END:
break
supported_modes.append(CameraMode(mode_int).name)
self.logger.debug("Got supported modes {} for camera {}".format(supported_modes,
camera_ID))
return supported_modes
def get_camera_mode(self, camera_ID):
"""Get current trigger mode for camera with given integer ID."""
mode = ctypes.int()
self._call_function('ASIGetCameraMode',
camera_ID,
ctypes.byref(mode))
mode_name = CameraMode(mode).name
self.logger.debug('Camera {} is in trigger mode {}'.format(camera_ID, mode_name))
return mode_name
def set_camera_mode(self, camera_ID, mode_name):
"""Set trigger mode for camera with given integer ID."""
mode = CameraMode[mode_name]
self._call_function('ASISetCameraMode',
camera_ID,
mode)
self.logger.debug('Set trigger mode of camera {} to {}.'.format(camera_ID, mode_name))
def send_soft_trigger(self, camera_ID, start_stop_signal):
"""Send out a soft trigger on camera with given integer ID."""
self._call_function('ASISendSoftTrigger',
camera_ID,
int(bool(start_stop_signal)))
self.logger.debug('Soft trigger sent to camera {}.'.format(camera_ID))
def get_serial_number(self, camera_ID):
"""Get serial number of the camera with given integer ID.
The serial number is an array of 8 unsigned chars, the same as string ID,
but it is interpreted differently. It is displayed in ASICAP as a 16 digit
hexadecimal number, so we will convert it the same 16 character string
representation.
"""
struct_SN = ID() # Same structure as string ID.
self._call_function('ASIGetSerialNumber',
camera_ID,
ctypes.byref(struct_SN))
bytes_SN = bytes(struct_SN.id)
serial_number = "".join(f"{b:02x}" for b in bytes_SN)
self.logger.debug("Got serial number '{}' from camera {}".format(serial_number, camera_ID))
return serial_number
def get_trigger_output_io_conf(self, camera_ID):
"""Get external trigger configuration of the camera with given integer ID."""
pin = ctypes.c_int()
pin_high = ctypes.c_int()
delay = ctypes.c_long()
duration = ctypes.c_long()
self._call_function('ASIGetTriggerOutputIOConf',
camera_ID,
ctypes.byref(pin),
ctypes.bytef(pin_high),
ctypes.byref(delay),
ctypes.byref(duration))
self.logger.debug("Got trigger config from camera {}".format(camera_ID))
return TrigOutput(pin).name, bool(pin_high), int(delay), int(duration)
def set_trigger_ouput_io_conf(self, camera_ID, pin, pin_high, delay, duration):
"""Set external trigger configuration of the camera with given integer ID."""
self._call_function('ASISetTriggerOutputIOConf',
camera_ID,
TrigOutput[pin],
ctypes.c_int(pin_high),
ctypes.c_long(delay),
ctypes.c_long(duration))
self.logger.debug("Set trigger config of camera {}".format(camera_ID))
def start_exposure(self, camera_ID):
""" Start exposure on the camera with given integer ID """
self._call_function('ASIStartExposure', camera_ID)
self.logger.debug("Exposure started on camera {}".format(camera_ID))
def stop_exposure(self, camera_ID):
""" Cancel current exposure on camera with given integer ID """
self._call_function('ASIStopExposure', camera_ID)
self.logger.debug("Exposure on camera {} cancelled".format(camera_ID))
def get_exposure_status(self, camera_ID):
""" Get status of current exposure on camera with given integer ID """
status = ctypes.c_int()
self._call_function('ASIGetExpStatus', camera_ID, ctypes.byref(status))
return ExposureStatus(status.value).name
def get_exposure_data(self, camera_ID, width, height, image_type):
""" Get image data from exposure on camera with given integer ID """
exposure_data = self._image_array(width, height, image_type)
self._call_function('ASIGetDataAfterExp',
camera_ID,
exposure_data.ctypes.data_as(ctypes.POINTER(ctypes.c_byte)),
ctypes.c_long(exposure_data.nbytes))
self.logger.debug("Got exposure data from camera {}".format(camera_ID))
return exposure_data
def start_video_capture(self, camera_ID):
""" Start video capture mode on camera with given integer ID """
self._call_function('ASIStartVideoCapture', camera_ID)
def stop_video_capture(self, camera_ID):
""" Stop video capture mode on camera with given integer ID """
self._call_function('ASIStopVideoCapture', camera_ID)
def get_video_data(self, camera_ID, width, height, image_type, timeout):
""" Get the image data from the next available video frame """
video_data = self._image_array(width, height, image_type)
timeout = int(get_quantity_value(timeout, unit=u.ms))
try:
self._call_function('ASIGetVideoData',
camera_ID,
video_data.ctypes.data_as(ctypes.POINTER(ctypes.c_byte)),
ctypes.c_long(video_data.nbytes),
ctypes.c_int(-1))
# If set timeout to anything but -1 (no timeout) this call times out instantly?
except RuntimeError:
# Expect some dropped frames during video capture
return None
else:
return video_data
# Private methods
def _call_function(self, function_name, camera_ID, *args):
""" Utility function for calling the SDK functions that return ErrorCode """
function = getattr(self._CDLL, function_name)
error_code = function(ctypes.c_int(camera_ID), *args)
if error_code != ErrorCode.SUCCESS:
msg = "Error calling {}: {}".format(function_name, ErrorCode(error_code).name)
self.logger.error(msg)
raise RuntimeError(msg)
def _parse_info(self, camera_info):
""" Utility function to parse CameraInfo Structures into something more Pythonic """
pythonic_info = {'name': camera_info.name.decode(),
'camera_ID': int(camera_info.camera_ID),
'max_height': camera_info.max_height * u.pixel,
'max_width': camera_info.max_width * u.pixel,
'is_color_camera': bool(camera_info.is_color_camera),
'bayer_pattern': BayerPattern(camera_info.bayer_pattern).name,
'supported_bins': self._parse_bins(camera_info.supported_bins),
'supported_video_format': self._parse_formats(
camera_info.supported_video_format),
'pixel_size': camera_info.pixel_size * u.um,
'has_mechanical_shutter': bool(camera_info.has_mechanical_shutter),
'has_ST4_port': bool(camera_info.has_ST4_port),
'has_cooler': bool(camera_info.has_cooler),
'is_USB3_host': bool(camera_info.is_USB3_host),
'is_USB3_camera': bool(camera_info.is_USB3_camera),
'e_per_adu': camera_info.e_per_adu * u.electron / u.adu,
'bit_depth': camera_info.bit_depth * u.bit,
'is_trigger_camera': bool(camera_info.is_trigger_camera)}
return pythonic_info
def _parse_bins(self, supported_bins):
bins = tuple(int(b) for b in supported_bins if b != 0)
return bins
def _parse_formats(self, supported_formats):
formats = []
for supported_format in supported_formats:
format = ImgType(supported_format)
if format != ImgType.END:
formats.append(format.name)
else:
break
return tuple(formats)
def _parse_caps(self, control_caps):
""" Utility function to parse ControlCaps Structures into something more Pythonic """
control_type = ControlType(control_caps.control_type).name
control_info = {'name': control_caps.name.decode(),
'description': control_caps.description.decode(),
'max_value': self._parse_return_value(control_caps.max_value,
control_type),
'min_value': self._parse_return_value(control_caps.min_value,
control_type),
'default_value': self._parse_return_value(control_caps.default_value,
control_type),
'is_auto_supported': bool(control_caps.is_auto_supported),
'is_writable': bool(control_caps.is_writable),
'control_type': control_type}
return control_info
def _parse_return_value(self, value, control_type):
""" Helper function to apply appropiate type conversion and/or units to value """
try:
int_value = value.value # If not done already extract Python int from ctypes.c_long
except AttributeError:
int_value = value # If from a ctypes struct value will already be a Python int
# Apply control type specific units and/or data types
if control_type in units_and_scale:
nice_value = int_value * units_and_scale[control_type]
elif control_type in boolean_controls:
nice_value = bool(int_value)
elif control_type == 'FLIP':
nice_value = FlipStatus(int_value).name
else:
nice_value = int_value
return nice_value
def _parse_input_value(self, value, control_type):
""" Helper function to convert input values to appropriate ctypes.c_long """
if control_type in units_and_scale:
value = get_quantity_value(value, unit=units_and_scale[control_type])
elif control_type == 'FLIP':
value = FlipStatus[value]
return ctypes.c_long(int(value))
def _image_array(self, width, height, image_type):
""" Creates a suitable numpy array for storing image data """
width = int(get_quantity_value(width, unit=u.pixel))
height = int(get_quantity_value(height, unit=u.pixel))
if image_type in ('RAW8', 'Y8'):
image_array = np.zeros((height, width), dtype=np.uint8, order='C')
elif image_type == 'RAW16':
image_array = np.zeros((height, width), dtype=np.uint16, order='C')
elif image_type == 'RGB24':
image_array = np.zeros((3, height, width), dtype=np.uint8, order='C')
return image_array
units_and_scale = {'AUTO_TARGET_BRIGHTNESS': u.adu,
'AUTO_MAX_EXP': 1e-6 * u.second, # Unit is microseconds
'BANDWIDTHOVERLOAD': u.percent,
'COOLER_POWER_PERC': u.percent,
'EXPOSURE': 1e-6 * u.second, # Unit is microseconds
'OFFSET': u.adu,
'TARGET_TEMP': u.Celsius,
'TEMPERATURE': 0.1 * u.Celsius} # Unit is 1/10th degree C
boolean_controls = ('ANTI_DEW_HEATER',
'COOLER_ON',
'FAN_ON',
'HARDWARE_BIN',
'HIGH_SPEED_MODE',
'MONO_BIN',
'PATTERN_ADJUST')
####################################################################################################
#
# The C defines, enums and structs from ASICamera2.h translated to Python constants, enums and
# ctypes.Structures. Based on v1.13.0930 of the ZWO ASI SDK.
#
####################################################################################################
ID_MAX = 128 # Maximum value for camera integer ID (camera_ID)
@enum.unique
class BayerPattern(enum.IntEnum):
""" Bayer filter type """
RG = 0
BG = enum.auto()
GR = enum.auto()
GB = enum.auto()
@enum.unique
class ImgType(enum.IntEnum):
""" Supported video format """
RAW8 = 0
RGB24 = enum.auto()
RAW16 = enum.auto()
Y8 = enum.auto()
END = -1
@enum.unique
class GuideDirection(enum.IntEnum):
""" Guider direction """
NORTH = 0
SOUTH = enum.auto()
EAST = enum.auto()
WEST = enum.auto()
@enum.unique
class FlipStatus(enum.IntEnum):
""" Flip status """
NONE = 0
HORIZ = enum.auto()
VERT = enum.auto()
BOTH = enum.auto()
@enum.unique
class CameraMode(enum.IntEnum):
""" Camera status """
NORMAL = 0
TRIG_SOFT_EDGE = enum.auto()
TRIG_RISE_EDGE = enum.auto()
TRIG_FALL_EDGE = enum.auto()
TRIG_SOFT_LEVEL = enum.auto()
TRIG_HIGH_LEVEL = enum.auto()
TRIG_LOW_LEVEL = enum.auto()
END = -1
@enum.unique
class TrigOutput(enum.IntEnum):
"""External trigger output."""
PINA = 0 # Only Pin A output
PINB = enum.auto() # Only Pin B outoput
NONE = -1
@enum.unique
class ErrorCode(enum.IntEnum):
""" Error codes """
SUCCESS = 0
INVALID_INDEX = enum.auto() # No camera connected or index value out of boundary
INVALID_ID = enum.auto()
INVALID_CONTROL_TYPE = enum.auto()
CAMERA_CLOSED = enum.auto() # Camera didn't open
CAMERA_REMOVED = enum.auto() # Failed to fine the camera, maybe it was removed
INVALID_PATH = enum.auto() # Cannot find the path of the file
INVALID_FILEFORMAT = enum.auto()
INVALID_SIZE = enum.auto() # Wrong video format size
INVALID_IMGTYPE = enum.auto() # Unsupported image format
OUTOF_BOUNDARY = enum.auto() # The startpos is out of boundary
TIMEOUT = enum.auto()
INVALID_SEQUENCE = enum.auto() # Stop capture first
BUFFER_TOO_SMALL = enum.auto()
VIDEO_MODE_ACTIVE = enum.auto()
EXPOSURE_IN_PROGRESS = enum.auto()
GENERAL_ERROR = enum.auto() # General error, e.g. value is out of valid range
INVALID_MODE = enum.auto() # The current mode is wrong
END = enum.auto()
class CameraInfo(ctypes.Structure):
""" Camera info structure """
_fields_ = [('name', ctypes.c_char * 64),
('camera_ID', ctypes.c_int),
('max_height', ctypes.c_long),
('max_width', ctypes.c_long),
('is_color_camera', ctypes.c_int),
('bayer_pattern', ctypes.c_int),
('supported_bins', ctypes.c_int * 16), # e.g. (1,2,4,8,0,...) means 1x, 2x, 4x, 8x
('supported_video_format', ctypes.c_int * 8), # ImgTypes, terminates with END
('pixel_size', ctypes.c_double), # in microns
('has_mechanical_shutter', ctypes.c_int),
('has_ST4_port', ctypes.c_int),
('has_cooler', ctypes.c_int),
('is_USB3_host', ctypes.c_int),
('is_USB3_camera', ctypes.c_int),
('e_per_adu', ctypes.c_float),
('bit_depth', ctypes.c_int),
('is_trigger_camera', ctypes.c_int),
('unused', ctypes.c_char * 16)]
class ControlType(enum.IntEnum):
""" Control types """
GAIN = 0
EXPOSURE = enum.auto()
GAMMA = enum.auto()
WB_R = enum.auto()
WB_B = enum.auto()
OFFSET = enum.auto()
BANDWIDTHOVERLOAD = enum.auto()
OVERCLOCK = enum.auto()
TEMPERATURE = enum.auto() # Returns temperature*10
FLIP = enum.auto()
AUTO_MAX_GAIN = enum.auto()
AUTO_MAX_EXP = enum.auto() # in microseconds
AUTO_TARGET_BRIGHTNESS = enum.auto()
HARDWARE_BIN = enum.auto()
HIGH_SPEED_MODE = enum.auto()
COOLER_POWER_PERC = enum.auto()
TARGET_TEMP = enum.auto() # NOT *10
COOLER_ON = enum.auto()
MONO_BIN = enum.auto() # Leads to less grid at software bin mode for colour camera
FAN_ON = enum.auto()
PATTERN_ADJUST = enum.auto()
ANTI_DEW_HEATER = enum.auto()
BRIGHTNESS = OFFSET
AUTO_MAX_BRIGHTNESS = AUTO_TARGET_BRIGHTNESS
class ControlCaps(ctypes.Structure):
""" Structure for caps (limits) on allowable parameter values for each camera control """
_fields_ = [('name', ctypes.c_char * 64), # The name of the control, .e.g. Exposure, Gain
('description', ctypes.c_char * 128), # Description of the command
('max_value', ctypes.c_long),
('min_value', ctypes.c_long),
('default_value', ctypes.c_long),
('is_auto_supported', ctypes.c_int),
('is_writable', ctypes.c_int), # Some can be read only, e.g. temperature
('control_type', ctypes.c_int), # ControlType used to get/set value
('unused', ctypes.c_char * 32)]
class ExposureStatus(enum.IntEnum):
""" Exposure status codes """
IDLE = 0
WORKING = enum.auto()
SUCCESS = enum.auto()
FAILED = enum.auto()
class ID(ctypes.Structure):
_fields_ = [('id', ctypes.c_ubyte * 8)]
class SupportedMode(ctypes.Structure):
""" Array of supported CameraModes, terminated with CameraMode.END """
_fields_ = [('modes', ctypes.c_int * 16)]
|
py | 1a4bd2344782a8888d1f3d2a1dd7c730a8aec2b7 | # -*- coding: utf-8 -*-
# This code is part of Ansible, but is an independent component
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
# Copyright: (c) 2017, Dag Wieers <[email protected]>
# Copyright: (c) 2017, Jacob McGill (@jmcgill298)
# Copyright: (c) 2017, Swetha Chunduri (@schunduri)
# Copyright: (c) 2019, Rob Huelga (@RobW3LGA)
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import base64
import json
import os
from copy import deepcopy
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_bytes, to_native
# Optional, only used for APIC signature-based authentication
try:
from OpenSSL.crypto import FILETYPE_PEM, load_privatekey, sign
HAS_OPENSSL = True
except ImportError:
HAS_OPENSSL = False
# Optional, only used for XML payload
try:
import lxml.etree
HAS_LXML_ETREE = True
except ImportError:
HAS_LXML_ETREE = False
# Optional, only used for XML payload
try:
from xmljson import cobra
HAS_XMLJSON_COBRA = True
except ImportError:
HAS_XMLJSON_COBRA = False
def aci_argument_spec():
return dict(
host=dict(type='str', required=True, aliases=['hostname']),
port=dict(type='int', required=False),
username=dict(type='str', default='admin', aliases=['user']),
password=dict(type='str', no_log=True),
private_key=dict(type='str', aliases=['cert_key'], no_log=True), # Beware, this is not the same as client_key !
certificate_name=dict(type='str', aliases=['cert_name']), # Beware, this is not the same as client_cert !
output_level=dict(type='str', default='normal', choices=['debug', 'info', 'normal']),
timeout=dict(type='int', default=30),
use_proxy=dict(type='bool', default=True),
use_ssl=dict(type='bool', default=True),
validate_certs=dict(type='bool', default=True),
)
class ACIModule(object):
def __init__(self, module):
self.module = module
self.params = module.params
self.result = dict(changed=False)
self.headers = dict()
self.child_classes = set()
# error output
self.error = dict(code=None, text=None)
# normal output
self.existing = None
# info output
self.config = dict()
self.original = None
self.proposed = dict()
# debug output
self.filter_string = ''
self.method = None
self.path = None
self.response = None
self.status = None
self.url = None
# aci_rest output
self.imdata = None
self.totalCount = None
# Ensure protocol is set
self.define_protocol()
if self.module._debug:
self.module.warn('Enable debug output because ANSIBLE_DEBUG was set.')
self.params['output_level'] = 'debug'
if self.params['private_key']:
# Perform signature-based authentication, no need to log on separately
if not HAS_OPENSSL:
self.module.fail_json(msg='Cannot use signature-based authentication because pyopenssl is not available')
elif self.params['password'] is not None:
self.module.warn("When doing ACI signatured-based authentication, providing parameter 'password' is not required")
elif self.params['password']:
# Perform password-based authentication, log on using password
self.login()
else:
self.module.fail_json(msg="Either parameter 'password' or 'private_key' is required for authentication")
def boolean(self, value, true='yes', false='no'):
''' Return an acceptable value back '''
# When we expect value is of type=bool
if value is None:
return None
elif value is True:
return true
elif value is False:
return false
# If all else fails, escalate back to user
self.module.fail_json(msg="Boolean value '%s' is an invalid ACI boolean value.")
def iso8601_format(self, dt):
''' Return an ACI-compatible ISO8601 formatted time: 2123-12-12T00:00:00.000+00:00 '''
try:
return dt.isoformat(timespec='milliseconds')
except Exception:
tz = dt.strftime('%z')
return '%s.%03d%s:%s' % (dt.strftime('%Y-%m-%dT%H:%M:%S'), dt.microsecond / 1000, tz[:3], tz[3:])
def define_protocol(self):
''' Set protocol based on use_ssl parameter '''
# Set protocol for further use
self.params['protocol'] = 'https' if self.params.get('use_ssl', True) else 'http'
def define_method(self):
''' Set method based on state parameter '''
# Set method for further use
state_map = dict(absent='delete', present='post', query='get')
self.params['method'] = state_map[self.params['state']]
def login(self):
''' Log in to APIC '''
# Perform login request
if 'port' in self.params and self.params['port'] is not None:
url = '%(protocol)s://%(host)s:%(port)s/api/aaaLogin.json' % self.params
else:
url = '%(protocol)s://%(host)s/api/aaaLogin.json' % self.params
payload = {'aaaUser': {'attributes': {'name': self.params['username'], 'pwd': self.params['password']}}}
resp, auth = fetch_url(self.module, url,
data=json.dumps(payload),
method='POST',
timeout=self.params['timeout'],
use_proxy=self.params['use_proxy'])
# Handle APIC response
if auth['status'] != 200:
self.response = auth['msg']
self.status = auth['status']
try:
# APIC error
self.response_json(auth['body'])
self.fail_json(msg='Authentication failed: %(code)s %(text)s' % self.error)
except KeyError:
# Connection error
self.fail_json(msg='Connection failed for %(url)s. %(msg)s' % auth)
# Retain cookie for later use
self.headers['Cookie'] = resp.headers['Set-Cookie']
def cert_auth(self, path=None, payload='', method=None):
''' Perform APIC signature-based authentication, not the expected SSL client certificate authentication. '''
if method is None:
method = self.params['method'].upper()
# NOTE: ACI documentation incorrectly uses complete URL
if path is None:
path = self.path
path = '/' + path.lstrip('/')
if payload is None:
payload = ''
# Check if we got a private key. This allows the use of vaulting the private key.
if self.params['private_key'].startswith('-----BEGIN PRIVATE KEY-----'):
try:
sig_key = load_privatekey(FILETYPE_PEM, self.params['private_key'])
except Exception:
self.module.fail_json(msg="Cannot load provided 'private_key' parameter.")
# Use the username as the certificate_name value
if self.params['certificate_name'] is None:
self.params['certificate_name'] = self.params['username']
elif self.params['private_key'].startswith('-----BEGIN CERTIFICATE-----'):
self.module.fail_json(msg="Provided 'private_key' parameter value appears to be a certificate. Please correct.")
else:
# If we got a private key file, read from this file.
# NOTE: Avoid exposing any other credential as a filename in output...
if not os.path.exists(self.params['private_key']):
self.module.fail_json(msg="The provided private key file does not appear to exist. Is it a filename?")
try:
with open(self.params['private_key'], 'r') as fh:
private_key_content = fh.read()
except Exception:
self.module.fail_json(msg="Cannot open private key file '%s'." % self.params['private_key'])
if private_key_content.startswith('-----BEGIN PRIVATE KEY-----'):
try:
sig_key = load_privatekey(FILETYPE_PEM, private_key_content)
except Exception:
self.module.fail_json(msg="Cannot load private key file '%s'." % self.params['private_key'])
# Use the private key basename (without extension) as certificate_name
if self.params['certificate_name'] is None:
self.params['certificate_name'] = os.path.basename(os.path.splitext(self.params['private_key'])[0])
elif private_key_content.startswith('-----BEGIN CERTIFICATE-----'):
self.module.fail_json(msg="Provided private key file %s appears to be a certificate. Please correct." % self.params['private_key'])
else:
self.module.fail_json(msg="Provided private key file '%s' does not appear to be a private key. Please correct." % self.params['private_key'])
# NOTE: ACI documentation incorrectly adds a space between method and path
sig_request = method + path + payload
sig_signature = base64.b64encode(sign(sig_key, sig_request, 'sha256'))
sig_dn = 'uni/userext/user-%s/usercert-%s' % (self.params['username'], self.params['certificate_name'])
self.headers['Cookie'] = 'APIC-Certificate-Algorithm=v1.0; ' +\
'APIC-Certificate-DN=%s; ' % sig_dn +\
'APIC-Certificate-Fingerprint=fingerprint; ' +\
'APIC-Request-Signature=%s' % to_native(sig_signature)
def response_json(self, rawoutput):
''' Handle APIC JSON response output '''
try:
jsondata = json.loads(rawoutput)
except Exception as e:
# Expose RAW output for troubleshooting
self.error = dict(code=-1, text="Unable to parse output as JSON, see 'raw' output. %s" % e)
self.result['raw'] = rawoutput
return
# Extract JSON API output
try:
self.imdata = jsondata['imdata']
except KeyError:
self.imdata = dict()
self.totalCount = int(jsondata['totalCount'])
# Handle possible APIC error information
self.response_error()
def response_xml(self, rawoutput):
''' Handle APIC XML response output '''
# NOTE: The XML-to-JSON conversion is using the "Cobra" convention
try:
xml = lxml.etree.fromstring(to_bytes(rawoutput))
xmldata = cobra.data(xml)
except Exception as e:
# Expose RAW output for troubleshooting
self.error = dict(code=-1, text="Unable to parse output as XML, see 'raw' output. %s" % e)
self.result['raw'] = rawoutput
return
# Reformat as ACI does for JSON API output
try:
self.imdata = xmldata['imdata']['children']
except KeyError:
self.imdata = dict()
self.totalCount = int(xmldata['imdata']['attributes']['totalCount'])
# Handle possible APIC error information
self.response_error()
def response_error(self):
''' Set error information when found '''
# Handle possible APIC error information
if self.totalCount != '0':
try:
self.error = self.imdata[0]['error']['attributes']
except (KeyError, IndexError):
pass
def request(self, path, payload=None):
''' Perform a REST request '''
# Ensure method is set (only do this once)
self.define_method()
self.path = path
if 'port' in self.params and self.params['port'] is not None:
self.url = '%(protocol)s://%(host)s:%(port)s/' % self.params + path.lstrip('/')
else:
self.url = '%(protocol)s://%(host)s/' % self.params + path.lstrip('/')
# Sign and encode request as to APIC's wishes
if self.params['private_key']:
self.cert_auth(path=path, payload=payload)
# Perform request
resp, info = fetch_url(self.module, self.url,
data=payload,
headers=self.headers,
method=self.params['method'].upper(),
timeout=self.params['timeout'],
use_proxy=self.params['use_proxy'])
self.response = info['msg']
self.status = info['status']
# Handle APIC response
if info['status'] != 200:
try:
# APIC error
self.response_json(info['body'])
self.fail_json(msg='APIC Error %(code)s: %(text)s' % self.error)
except KeyError:
# Connection error
self.fail_json(msg='Connection failed for %(url)s. %(msg)s' % info)
self.response_json(resp.read())
def query(self, path):
''' Perform a query with no payload '''
self.path = path
if 'port' in self.params and self.params['port'] is not None:
self.url = '%(protocol)s://%(host)s:%(port)s/' % self.params + path.lstrip('/')
else:
self.url = '%(protocol)s://%(host)s/' % self.params + path.lstrip('/')
# Sign and encode request as to APIC's wishes
if self.params['private_key']:
self.cert_auth(path=path, method='GET')
# Perform request
resp, query = fetch_url(self.module, self.url,
data=None,
headers=self.headers,
method='GET',
timeout=self.params['timeout'],
use_proxy=self.params['use_proxy'])
# Handle APIC response
if query['status'] != 200:
self.response = query['msg']
self.status = query['status']
try:
# APIC error
self.response_json(query['body'])
self.fail_json(msg='APIC Error %(code)s: %(text)s' % self.error)
except KeyError:
# Connection error
self.fail_json(msg='Connection failed for %(url)s. %(msg)s' % query)
query = json.loads(resp.read())
return json.dumps(query['imdata'], sort_keys=True, indent=2) + '\n'
def request_diff(self, path, payload=None):
''' Perform a request, including a proper diff output '''
self.result['diff'] = dict()
self.result['diff']['before'] = self.query(path)
self.request(path, payload=payload)
# TODO: Check if we can use the request output for the 'after' diff
self.result['diff']['after'] = self.query(path)
if self.result['diff']['before'] != self.result['diff']['after']:
self.result['changed'] = True
# TODO: This could be designed to update existing keys
def update_qs(self, params):
''' Append key-value pairs to self.filter_string '''
accepted_params = dict((k, v) for (k, v) in params.items() if v is not None)
if accepted_params:
if self.filter_string:
self.filter_string += '&'
else:
self.filter_string = '?'
self.filter_string += '&'.join(['%s=%s' % (k, v) for (k, v) in accepted_params.items()])
# TODO: This could be designed to accept multiple obj_classes and keys
def build_filter(self, obj_class, params):
''' Build an APIC filter based on obj_class and key-value pairs '''
accepted_params = dict((k, v) for (k, v) in params.items() if v is not None)
if len(accepted_params) == 1:
return ','.join('eq({0}.{1},"{2}")'.format(obj_class, k, v) for (k, v) in accepted_params.items())
elif len(accepted_params) > 1:
return 'and(' + ','.join(['eq({0}.{1},"{2}")'.format(obj_class, k, v) for (k, v) in accepted_params.items()]) + ')'
def _deep_url_path_builder(self, obj):
target_class = obj['target_class']
target_filter = obj['target_filter']
subtree_class = obj['subtree_class']
subtree_filter = obj['subtree_filter']
object_rn = obj['object_rn']
mo = obj['module_object']
add_subtree_filter = obj['add_subtree_filter']
add_target_filter = obj['add_target_filter']
if self.module.params['state'] in ('absent', 'present') and mo is not None:
self.path = 'api/mo/uni/{0}.json'.format(object_rn)
self.update_qs({'rsp-prop-include': 'config-only'})
else:
# State is 'query'
if object_rn is not None:
# Query for a specific object in the module's class
self.path = 'api/mo/uni/{0}.json'.format(object_rn)
else:
self.path = 'api/class/{0}.json'.format(target_class)
if add_target_filter:
self.update_qs(
{'query-target-filter': self.build_filter(target_class, target_filter)})
if add_subtree_filter:
self.update_qs(
{'rsp-subtree-filter': self.build_filter(subtree_class, subtree_filter)})
if 'port' in self.params and self.params['port'] is not None:
self.url = '{protocol}://{host}:{port}/{path}'.format(
path=self.path, **self.module.params)
else:
self.url = '{protocol}://{host}/{path}'.format(
path=self.path, **self.module.params)
if self.child_classes:
self.update_qs(
{'rsp-subtree': 'full', 'rsp-subtree-class': ','.join(sorted(self.child_classes))})
def _deep_url_parent_object(self, parent_objects, parent_class):
for parent_object in parent_objects:
if parent_object['aci_class'] is parent_class:
return parent_object
return None
def construct_deep_url(self, target_object, parent_objects=None, child_classes=None):
"""
This method is used to retrieve the appropriate URL path and filter_string to make the request to the APIC.
:param target_object: The target class dictionary containing parent_class, aci_class, aci_rn, target_filter, and module_object keys.
:param parent_objects: The parent class list of dictionaries containing parent_class, aci_class, aci_rn, target_filter, and module_object keys.
:param child_classes: The list of child classes that the module supports along with the object.
:type target_object: dict
:type parent_objects: list[dict]
:type child_classes: list[string]
:return: The path and filter_string needed to build the full URL.
"""
self.filter_string = ''
rn_builder = None
subtree_classes = None
add_subtree_filter = False
add_target_filter = False
has_target_query = False
has_target_query_compare = False
has_target_query_difference = False
has_target_query_called = False
if child_classes is None:
self.child_classes = set()
else:
self.child_classes = set(child_classes)
target_parent_class = target_object['parent_class']
target_class = target_object['aci_class']
target_rn = target_object['aci_rn']
target_filter = target_object['target_filter']
target_module_object = target_object['module_object']
url_path_object = dict(
target_class=target_class,
target_filter=target_filter,
subtree_class=target_class,
subtree_filter=target_filter,
module_object=target_module_object
)
if target_module_object is not None:
rn_builder = target_rn
else:
has_target_query = True
has_target_query_compare = True
if parent_objects is not None:
current_parent_class = target_parent_class
has_parent_query_compare = False
has_parent_query_difference = False
is_first_parent = True
is_single_parent = None
search_classes = set()
while current_parent_class != 'uni':
parent_object = self._deep_url_parent_object(
parent_objects=parent_objects, parent_class=current_parent_class)
if parent_object is not None:
parent_parent_class = parent_object['parent_class']
parent_class = parent_object['aci_class']
parent_rn = parent_object['aci_rn']
parent_filter = parent_object['target_filter']
parent_module_object = parent_object['module_object']
if is_first_parent:
is_single_parent = True
else:
is_single_parent = False
is_first_parent = False
if parent_parent_class != 'uni':
search_classes.add(parent_class)
if parent_module_object is not None:
if rn_builder is not None:
rn_builder = '{0}/{1}'.format(parent_rn,
rn_builder)
else:
rn_builder = parent_rn
url_path_object['target_class'] = parent_class
url_path_object['target_filter'] = parent_filter
has_target_query = False
else:
rn_builder = None
subtree_classes = search_classes
has_target_query = True
if is_single_parent:
has_parent_query_compare = True
current_parent_class = parent_parent_class
else:
raise ValueError("Reference error for parent_class '{0}'. Each parent_class must reference a valid object".format(current_parent_class))
if not has_target_query_difference and not has_target_query_called:
if has_target_query is not has_target_query_compare:
has_target_query_difference = True
else:
if not has_parent_query_difference and has_target_query is not has_parent_query_compare:
has_parent_query_difference = True
has_target_query_called = True
if not has_parent_query_difference and has_parent_query_compare and target_module_object is not None:
add_target_filter = True
elif has_parent_query_difference and target_module_object is not None:
add_subtree_filter = True
self.child_classes.add(target_class)
if has_target_query:
add_target_filter = True
elif has_parent_query_difference and not has_target_query and target_module_object is None:
self.child_classes.add(target_class)
self.child_classes.update(subtree_classes)
elif not has_parent_query_difference and not has_target_query and target_module_object is None:
self.child_classes.add(target_class)
elif not has_target_query and is_single_parent and target_module_object is None:
self.child_classes.add(target_class)
url_path_object['object_rn'] = rn_builder
url_path_object['add_subtree_filter'] = add_subtree_filter
url_path_object['add_target_filter'] = add_target_filter
self._deep_url_path_builder(url_path_object)
def construct_url(self, root_class, subclass_1=None, subclass_2=None, subclass_3=None, child_classes=None):
"""
This method is used to retrieve the appropriate URL path and filter_string to make the request to the APIC.
:param root_class: The top-level class dictionary containing aci_class, aci_rn, target_filter, and module_object keys.
:param sublass_1: The second-level class dictionary containing aci_class, aci_rn, target_filter, and module_object keys.
:param sublass_2: The third-level class dictionary containing aci_class, aci_rn, target_filter, and module_object keys.
:param sublass_3: The fourth-level class dictionary containing aci_class, aci_rn, target_filter, and module_object keys.
:param child_classes: The list of child classes that the module supports along with the object.
:type root_class: dict
:type subclass_1: dict
:type subclass_2: dict
:type subclass_3: dict
:type child_classes: list
:return: The path and filter_string needed to build the full URL.
"""
self.filter_string = ''
if child_classes is None:
self.child_classes = set()
else:
self.child_classes = set(child_classes)
if subclass_3 is not None:
self._construct_url_4(root_class, subclass_1, subclass_2, subclass_3)
elif subclass_2 is not None:
self._construct_url_3(root_class, subclass_1, subclass_2)
elif subclass_1 is not None:
self._construct_url_2(root_class, subclass_1)
else:
self._construct_url_1(root_class)
if 'port' in self.params and self.params['port'] is not None:
self.url = '{protocol}://{host}:{port}/{path}'.format(path=self.path, **self.module.params)
else:
self.url = '{protocol}://{host}/{path}'.format(path=self.path, **self.module.params)
if self.child_classes:
# Append child_classes to filter_string if filter string is empty
self.update_qs({'rsp-subtree': 'full', 'rsp-subtree-class': ','.join(sorted(self.child_classes))})
def _construct_url_1(self, obj):
"""
This method is used by construct_url when the object is the top-level class.
"""
obj_class = obj['aci_class']
obj_rn = obj['aci_rn']
obj_filter = obj['target_filter']
mo = obj['module_object']
if self.module.params['state'] in ('absent', 'present'):
# State is absent or present
self.path = 'api/mo/uni/{0}.json'.format(obj_rn)
self.update_qs({'rsp-prop-include': 'config-only'})
elif mo is None:
# Query for all objects of the module's class (filter by properties)
self.path = 'api/class/{0}.json'.format(obj_class)
self.update_qs({'query-target-filter': self.build_filter(obj_class, obj_filter)})
else:
# Query for a specific object in the module's class
self.path = 'api/mo/uni/{0}.json'.format(obj_rn)
def _construct_url_2(self, parent, obj):
"""
This method is used by construct_url when the object is the second-level class.
"""
parent_class = parent['aci_class']
parent_rn = parent['aci_rn']
parent_filter = parent['target_filter']
parent_obj = parent['module_object']
obj_class = obj['aci_class']
obj_rn = obj['aci_rn']
obj_filter = obj['target_filter']
mo = obj['module_object']
if self.module.params['state'] in ('absent', 'present'):
# State is absent or present
self.path = 'api/mo/uni/{0}/{1}.json'.format(parent_rn, obj_rn)
self.update_qs({'rsp-prop-include': 'config-only'})
elif parent_obj is None and mo is None:
# Query for all objects of the module's class
self.path = 'api/class/{0}.json'.format(obj_class)
self.update_qs({'query-target-filter': self.build_filter(obj_class, obj_filter)})
elif parent_obj is None: # mo is known
# Query for all objects of the module's class that match the provided ID value
self.path = 'api/class/{0}.json'.format(obj_class)
self.update_qs({'query-target-filter': self.build_filter(obj_class, obj_filter)})
elif mo is None: # parent_obj is known
# Query for all object's of the module's class that belong to a specific parent object
self.child_classes.add(obj_class)
self.path = 'api/mo/uni/{0}.json'.format(parent_rn)
else:
# Query for specific object in the module's class
self.path = 'api/mo/uni/{0}/{1}.json'.format(parent_rn, obj_rn)
def _construct_url_3(self, root, parent, obj):
"""
This method is used by construct_url when the object is the third-level class.
"""
root_class = root['aci_class']
root_rn = root['aci_rn']
root_filter = root['target_filter']
root_obj = root['module_object']
parent_class = parent['aci_class']
parent_rn = parent['aci_rn']
parent_filter = parent['target_filter']
parent_obj = parent['module_object']
obj_class = obj['aci_class']
obj_rn = obj['aci_rn']
obj_filter = obj['target_filter']
mo = obj['module_object']
if self.module.params['state'] in ('absent', 'present'):
# State is absent or present
self.path = 'api/mo/uni/{0}/{1}/{2}.json'.format(root_rn, parent_rn, obj_rn)
self.update_qs({'rsp-prop-include': 'config-only'})
elif root_obj is None and parent_obj is None and mo is None:
# Query for all objects of the module's class
self.path = 'api/class/{0}.json'.format(obj_class)
self.update_qs({'query-target-filter': self.build_filter(obj_class, obj_filter)})
elif root_obj is None and parent_obj is None: # mo is known
# Query for all objects of the module's class matching the provided ID value of the object
self.path = 'api/class/{0}.json'.format(obj_class)
self.update_qs({'query-target-filter': self.build_filter(obj_class, obj_filter)})
elif root_obj is None and mo is None: # parent_obj is known
# Query for all objects of the module's class that belong to any parent class
# matching the provided ID value for the parent object
self.child_classes.add(obj_class)
self.path = 'api/class/{0}.json'.format(parent_class)
self.update_qs({'query-target-filter': self.build_filter(parent_class, parent_filter)})
elif parent_obj is None and mo is None: # root_obj is known
# Query for all objects of the module's class that belong to a specific root object
self.child_classes.update([parent_class, obj_class])
self.path = 'api/mo/uni/{0}.json'.format(root_rn)
# NOTE: No need to select by root_filter
# self.update_qs({'query-target-filter': self.build_filter(root_class, root_filter)})
elif root_obj is None: # mo and parent_obj are known
# Query for all objects of the module's class that belong to any parent class
# matching the provided ID values for both object and parent object
self.child_classes.add(obj_class)
self.path = 'api/class/{0}.json'.format(parent_class)
self.update_qs({'query-target-filter': self.build_filter(parent_class, parent_filter)})
self.update_qs({'rsp-subtree-filter': self.build_filter(obj_class, obj_filter)})
elif parent_obj is None: # mo and root_obj are known
# Query for all objects of the module's class that match the provided ID value and belong to a specific root object
self.child_classes.add(obj_class)
self.path = 'api/mo/uni/{0}.json'.format(root_rn)
# NOTE: No need to select by root_filter
# self.update_qs({'query-target-filter': self.build_filter(root_class, root_filter)})
# TODO: Filter by parent_filter and obj_filter
self.update_qs({'rsp-subtree-filter': self.build_filter(obj_class, obj_filter)})
elif mo is None: # root_obj and parent_obj are known
# Query for all objects of the module's class that belong to a specific parent object
self.child_classes.add(obj_class)
self.path = 'api/mo/uni/{0}/{1}.json'.format(root_rn, parent_rn)
# NOTE: No need to select by parent_filter
# self.update_qs({'query-target-filter': self.build_filter(parent_class, parent_filter)})
else:
# Query for a specific object of the module's class
self.path = 'api/mo/uni/{0}/{1}/{2}.json'.format(root_rn, parent_rn, obj_rn)
def _construct_url_4(self, root, sec, parent, obj):
"""
This method is used by construct_url when the object is the fourth-level class.
"""
root_class = root['aci_class']
root_rn = root['aci_rn']
root_filter = root['target_filter']
root_obj = root['module_object']
sec_class = sec['aci_class']
sec_rn = sec['aci_rn']
sec_filter = sec['target_filter']
sec_obj = sec['module_object']
parent_class = parent['aci_class']
parent_rn = parent['aci_rn']
parent_filter = parent['target_filter']
parent_obj = parent['module_object']
obj_class = obj['aci_class']
obj_rn = obj['aci_rn']
obj_filter = obj['target_filter']
mo = obj['module_object']
if self.child_classes is None:
self.child_classes = [obj_class]
if self.module.params['state'] in ('absent', 'present'):
# State is absent or present
self.path = 'api/mo/uni/{0}/{1}/{2}/{3}.json'.format(root_rn, sec_rn, parent_rn, obj_rn)
self.update_qs({'rsp-prop-include': 'config-only'})
# TODO: Add all missing cases
elif root_obj is None:
self.child_classes.add(obj_class)
self.path = 'api/class/{0}.json'.format(obj_class)
self.update_qs({'query-target-filter': self.build_filter(obj_class, obj_filter)})
elif sec_obj is None:
self.child_classes.add(obj_class)
self.path = 'api/mo/uni/{0}.json'.format(root_rn)
# NOTE: No need to select by root_filter
# self.update_qs({'query-target-filter': self.build_filter(root_class, root_filter)})
# TODO: Filter by sec_filter, parent and obj_filter
self.update_qs({'rsp-subtree-filter': self.build_filter(obj_class, obj_filter)})
elif parent_obj is None:
self.child_classes.add(obj_class)
self.path = 'api/mo/uni/{0}/{1}.json'.format(root_rn, sec_rn)
# NOTE: No need to select by sec_filter
# self.update_qs({'query-target-filter': self.build_filter(sec_class, sec_filter)})
# TODO: Filter by parent_filter and obj_filter
self.update_qs({'rsp-subtree-filter': self.build_filter(obj_class, obj_filter)})
elif mo is None:
self.child_classes.add(obj_class)
self.path = 'api/mo/uni/{0}/{1}/{2}.json'.format(root_rn, sec_rn, parent_rn)
# NOTE: No need to select by parent_filter
# self.update_qs({'query-target-filter': self.build_filter(parent_class, parent_filter)})
else:
# Query for a specific object of the module's class
self.path = 'api/mo/uni/{0}/{1}/{2}/{3}.json'.format(root_rn, sec_rn, parent_rn, obj_rn)
def delete_config(self):
"""
This method is used to handle the logic when the modules state is equal to absent. The method only pushes a change if
the object exists, and if check_mode is False. A successful change will mark the module as changed.
"""
self.proposed = dict()
if not self.existing:
return
elif not self.module.check_mode:
# Sign and encode request as to APIC's wishes
if self.params['private_key']:
self.cert_auth(method='DELETE')
resp, info = fetch_url(self.module, self.url,
headers=self.headers,
method='DELETE',
timeout=self.params['timeout'],
use_proxy=self.params['use_proxy'])
self.response = info['msg']
self.status = info['status']
self.method = 'DELETE'
# Handle APIC response
if info['status'] == 200:
self.result['changed'] = True
self.response_json(resp.read())
else:
try:
# APIC error
self.response_json(info['body'])
self.fail_json(msg='APIC Error %(code)s: %(text)s' % self.error)
except KeyError:
# Connection error
self.fail_json(msg='Connection failed for %(url)s. %(msg)s' % info)
else:
self.result['changed'] = True
self.method = 'DELETE'
def get_diff(self, aci_class):
"""
This method is used to get the difference between the proposed and existing configurations. Each module
should call the get_existing method before this method, and add the proposed config to the module results
using the module's config parameters. The new config will added to the self.result dictionary.
:param aci_class: Type str.
This is the root dictionary key for the MO's configuration body, or the ACI class of the MO.
"""
proposed_config = self.proposed[aci_class]['attributes']
if self.existing:
existing_config = self.existing[0][aci_class]['attributes']
config = {}
# values are strings, so any diff between proposed and existing can be a straight replace
for key, value in proposed_config.items():
existing_field = existing_config.get(key)
if value != existing_field:
config[key] = value
# add name back to config only if the configs do not match
if config:
# TODO: If URLs are built with the object's name, then we should be able to leave off adding the name back
# config["name"] = proposed_config["name"]
config = {aci_class: {'attributes': config}}
# check for updates to child configs and update new config dictionary
children = self.get_diff_children(aci_class)
if children and config:
config[aci_class].update({'children': children})
elif children:
config = {aci_class: {'attributes': {}, 'children': children}}
else:
config = self.proposed
self.config = config
@staticmethod
def get_diff_child(child_class, proposed_child, existing_child):
"""
This method is used to get the difference between a proposed and existing child configs. The get_nested_config()
method should be used to return the proposed and existing config portions of child.
:param child_class: Type str.
The root class (dict key) for the child dictionary.
:param proposed_child: Type dict.
The config portion of the proposed child dictionary.
:param existing_child: Type dict.
The config portion of the existing child dictionary.
:return: The child config with only values that are updated. If the proposed dictionary has no updates to make
to what exists on the APIC, then None is returned.
"""
update_config = {child_class: {'attributes': {}}}
for key, value in proposed_child.items():
existing_field = existing_child.get(key)
if value != existing_field:
update_config[child_class]['attributes'][key] = value
if not update_config[child_class]['attributes']:
return None
return update_config
def get_diff_children(self, aci_class):
"""
This method is used to retrieve the updated child configs by comparing the proposed children configs
agains the objects existing children configs.
:param aci_class: Type str.
This is the root dictionary key for the MO's configuration body, or the ACI class of the MO.
:return: The list of updated child config dictionaries. None is returned if there are no changes to the child
configurations.
"""
proposed_children = self.proposed[aci_class].get('children')
if proposed_children:
child_updates = []
existing_children = self.existing[0][aci_class].get('children', [])
# Loop through proposed child configs and compare against existing child configuration
for child in proposed_children:
child_class, proposed_child, existing_child = self.get_nested_config(child, existing_children)
if existing_child is None:
child_update = child
else:
child_update = self.get_diff_child(child_class, proposed_child, existing_child)
# Update list of updated child configs only if the child config is different than what exists
if child_update:
child_updates.append(child_update)
else:
return None
return child_updates
def get_existing(self):
"""
This method is used to get the existing object(s) based on the path specified in the module. Each module should
build the URL so that if the object's name is supplied, then it will retrieve the configuration for that particular
object, but if no name is supplied, then it will retrieve all MOs for the class. Following this method will ensure
that this method can be used to supply the existing configuration when using the get_diff method. The response, status,
and existing configuration will be added to the self.result dictionary.
"""
uri = self.url + self.filter_string
# Sign and encode request as to APIC's wishes
if self.params['private_key']:
self.cert_auth(path=self.path + self.filter_string, method='GET')
resp, info = fetch_url(self.module, uri,
headers=self.headers,
method='GET',
timeout=self.params['timeout'],
use_proxy=self.params['use_proxy'])
self.response = info['msg']
self.status = info['status']
self.method = 'GET'
# Handle APIC response
if info['status'] == 200:
self.existing = json.loads(resp.read())['imdata']
else:
try:
# APIC error
self.response_json(info['body'])
self.fail_json(msg='APIC Error %(code)s: %(text)s' % self.error)
except KeyError:
# Connection error
self.fail_json(msg='Connection failed for %(url)s. %(msg)s' % info)
@staticmethod
def get_nested_config(proposed_child, existing_children):
"""
This method is used for stiping off the outer layers of the child dictionaries so only the configuration
key, value pairs are returned.
:param proposed_child: Type dict.
The dictionary that represents the child config.
:param existing_children: Type list.
The list of existing child config dictionaries.
:return: The child's class as str (root config dict key), the child's proposed config dict, and the child's
existing configuration dict.
"""
for key in proposed_child.keys():
child_class = key
proposed_config = proposed_child[key]['attributes']
existing_config = None
# FIXME: Design causes issues for repeated child_classes
# get existing dictionary from the list of existing to use for comparison
for child in existing_children:
if child.get(child_class):
existing_config = child[key]['attributes']
# NOTE: This is an ugly fix
# Return the one that is a subset match
if set(proposed_config.items()).issubset(set(existing_config.items())):
break
return child_class, proposed_config, existing_config
def payload(self, aci_class, class_config, child_configs=None):
"""
This method is used to dynamically build the proposed configuration dictionary from the config related parameters
passed into the module. All values that were not passed values from the playbook task will be removed so as to not
inadvertently change configurations.
:param aci_class: Type str
This is the root dictionary key for the MO's configuration body, or the ACI class of the MO.
:param class_config: Type dict
This is the configuration of the MO using the dictionary keys expected by the API
:param child_configs: Type list
This is a list of child dictionaries associated with the MOs config. The list should only
include child objects that are used to associate two MOs together. Children that represent
MOs should have their own module.
"""
proposed = dict((k, str(v)) for k, v in class_config.items() if v is not None)
self.proposed = {aci_class: {'attributes': proposed}}
# add child objects to proposed
if child_configs:
children = []
for child in child_configs:
child_copy = deepcopy(child)
has_value = False
for root_key in child_copy.keys():
for final_keys, values in child_copy[root_key]['attributes'].items():
if values is None:
child[root_key]['attributes'].pop(final_keys)
else:
child[root_key]['attributes'][final_keys] = str(values)
has_value = True
if has_value:
children.append(child)
if children:
self.proposed[aci_class].update(dict(children=children))
def post_config(self):
"""
This method is used to handle the logic when the modules state is equal to present. The method only pushes a change if
the object has differences than what exists on the APIC, and if check_mode is False. A successful change will mark the
module as changed.
"""
if not self.config:
return
elif not self.module.check_mode:
# Sign and encode request as to APIC's wishes
if self.params['private_key']:
self.cert_auth(method='POST', payload=json.dumps(self.config))
resp, info = fetch_url(self.module, self.url,
data=json.dumps(self.config),
headers=self.headers,
method='POST',
timeout=self.params['timeout'],
use_proxy=self.params['use_proxy'])
self.response = info['msg']
self.status = info['status']
self.method = 'POST'
# Handle APIC response
if info['status'] == 200:
self.result['changed'] = True
self.response_json(resp.read())
else:
try:
# APIC error
self.response_json(info['body'])
self.fail_json(msg='APIC Error %(code)s: %(text)s' % self.error)
except KeyError:
# Connection error
self.fail_json(msg='Connection failed for %(url)s. %(msg)s' % info)
else:
self.result['changed'] = True
self.method = 'POST'
def exit_json(self, **kwargs):
if 'state' in self.params:
if self.params['state'] in ('absent', 'present'):
if self.params['output_level'] in ('debug', 'info'):
self.result['previous'] = self.existing
# Return the gory details when we need it
if self.params['output_level'] == 'debug':
if 'state' in self.params:
self.result['filter_string'] = self.filter_string
self.result['method'] = self.method
# self.result['path'] = self.path # Adding 'path' in result causes state: absent in output
self.result['response'] = self.response
self.result['status'] = self.status
self.result['url'] = self.url
if 'state' in self.params:
self.original = self.existing
if self.params['state'] in ('absent', 'present'):
self.get_existing()
# if self.module._diff and self.original != self.existing:
# self.result['diff'] = dict(
# before=json.dumps(self.original, sort_keys=True, indent=4),
# after=json.dumps(self.existing, sort_keys=True, indent=4),
# )
self.result['current'] = self.existing
if self.params['output_level'] in ('debug', 'info'):
self.result['sent'] = self.config
self.result['proposed'] = self.proposed
self.result.update(**kwargs)
self.module.exit_json(**self.result)
def fail_json(self, msg, **kwargs):
# Return error information, if we have it
if self.error['code'] is not None and self.error['text'] is not None:
self.result['error'] = self.error
if 'state' in self.params:
if self.params['state'] in ('absent', 'present'):
if self.params['output_level'] in ('debug', 'info'):
self.result['previous'] = self.existing
# Return the gory details when we need it
if self.params['output_level'] == 'debug':
if self.imdata is not None:
self.result['imdata'] = self.imdata
self.result['totalCount'] = self.totalCount
if self.params['output_level'] == 'debug':
if self.url is not None:
if 'state' in self.params:
self.result['filter_string'] = self.filter_string
self.result['method'] = self.method
# self.result['path'] = self.path # Adding 'path' in result causes state: absent in output
self.result['response'] = self.response
self.result['status'] = self.status
self.result['url'] = self.url
if 'state' in self.params:
if self.params['output_level'] in ('debug', 'info'):
self.result['sent'] = self.config
self.result['proposed'] = self.proposed
self.result.update(**kwargs)
self.module.fail_json(msg=msg, **self.result)
|
py | 1a4bd3e91bc8b93f1288b7f52bf54923381f1caf | import tensorflow as tf # noqa
import copy
import os
import cPickle as pickle
import numpy as np
import hashlib
from ..data import helpers as helpers
from ..utils import misc as misc
from ..data import batch_fetcher as bfetchers
from ..experiments import experiment
from ..experiments import config as econfig
from ..model import conditionals as conds
from ..model import transforms as trans # noqa
from ..model import likelihoods as likes # noqa
from datetime import datetime
# Hyperparameters.
DEF_ARGS = {
'train_iters': 30000,
'hold_iters': 100,
'hold_interval': 2500,
'ncomps': 40,
'decay_interval': 5000,
'dropout_keeprate_val': None,
'optimizer_class': tf.train.AdamOptimizer,
'momentum': None,
'momentum_iter': 5000,
'max_grad_norm': 1.0,
'trans_alpha': None,
'rescale_init_constant': 1.0,
'trans_state_activation': tf.nn.tanh,
'cond_param_irange': 1e-6,
'first_do_linear_map': True,
'standardize': True,
'base_distribution': 'gaussian',
}
# Base configs for different transformations.
BASE_ARG_CHOICES = {
'lr_decay': (0.5, 0.1),
'init_lr': (0.005, ),
'first_trainable_A': (True, False),
'trans_funcs': [
None,
[trans.additive_coupling, trans.reverse, trans.additive_coupling,
trans.reverse, trans.additive_coupling, trans.reverse,
trans.additive_coupling, trans.log_rescale], # NICE Type
[trans.simple_rnn_transform, ], # 1xRNN
[trans.simple_rnn_transform, trans.reverse,
trans.simple_rnn_transform], # 2xRNN
[trans.rnn_coupling, trans.reverse, trans.rnn_coupling, trans.reverse,
trans.rnn_coupling, trans.reverse, trans.rnn_coupling,
trans.log_rescale], # 4xRNN Coup
[trans.simple_rnn_transform, trans.reverse,
trans.rnn_coupling, trans.reverse, trans.rnn_coupling, trans.reverse,
trans.rnn_coupling, trans.reverse, trans.rnn_coupling,
trans.log_rescale], # 1xRNN + RNN Coupling
[trans.simple_rnn_transform, trans.reverse, trans.additive_coupling,
trans.reverse, trans.additive_coupling, trans.reverse,
trans.additive_coupling, trans.reverse, trans.additive_coupling,
trans.log_rescale], # 1xRNN + NICE
],
}
# Get configs for standard Gaussian conditional model.
ARG_CHOICES_STDGAU = copy.copy(BASE_ARG_CHOICES)
ARG_CHOICES_STDGAU['single_marginal'] = (True,)
ARG_CHOICES_STDGAU['standard'] = (True,)
ARG_CHOICES_STDGAU['ncomps'] = (1, )
ARG_CHOICES_STDGAU['cond_func'] = (conds.independent_model,)
ARG_LIST_STDGAU = misc.make_arguments(ARG_CHOICES_STDGAU)
ARG_LIST_STDGAU = filter(
lambda conf: conf['first_trainable_A'] or conf['trans_funcs'] is not None,
ARG_LIST_STDGAU) # Avoid models that have no variables to optimize.
# Get configs for independent GMMs
ARG_CHOICES_IND = copy.copy(BASE_ARG_CHOICES)
ARG_CHOICES_IND['single_marginal'] = (False,)
ARG_CHOICES_IND['standard'] = (False,)
ARG_CHOICES_IND['cond_func'] = (conds.independent_model,)
ARG_LIST_IND = misc.make_arguments(ARG_CHOICES_IND)
# Get config for Tied conditional model.
ARG_CHOICES_TIED = copy.copy(BASE_ARG_CHOICES)
ARG_CHOICES_TIED['cond_tied_model'] = (True,)
ARG_CHOICES_TIED['param_nlayers'] = (2,)
ARG_CHOICES_TIED['cond_func'] = (conds.cond_model,)
ARG_LIST_TIED = misc.make_arguments(ARG_CHOICES_TIED)
# Get config for Untied conditional model.
ARG_CHOICES_UNTIED = copy.copy(BASE_ARG_CHOICES)
ARG_CHOICES_UNTIED['cond_tied_model'] = (False,)
ARG_CHOICES_UNTIED['param_nlayers'] = (2,)
ARG_CHOICES_UNTIED['cond_func'] = (conds.cond_model,)
ARG_LIST_UNTIED = misc.make_arguments(ARG_CHOICES_UNTIED)
# Get config for RNN conditional model.
ARG_CHOICES_RNN = copy.copy(BASE_ARG_CHOICES)
ARG_CHOICES_RNN['param_nlayers'] = (None, 2)
ARG_CHOICES_RNN['cond_func'] = (conds.rnn_model,)
ARG_LIST_RNN = misc.make_arguments(ARG_CHOICES_RNN)
# Get config for RNN conditional model.
ARG_CHOICES_RNN_FC = copy.copy(BASE_ARG_CHOICES)
ARG_CHOICES_RNN_FC['param_nlayers'] = (2, )
ARG_CHOICES_RNN_FC['cond_func'] = (conds.rnn_model,)
ARG_LIST_RNN_FC = misc.make_arguments(ARG_CHOICES_RNN_FC)
# Make the default be RNN conditional models.
ARG_LIST = misc.make_arguments(ARG_CHOICES_RNN)
def shorten(obj):
""" Helper function to shorten stringeds from long options, uses hash to
ensure shortening without collision """
string = str(obj)
if len(string) >= 255:
hash_object = hashlib.md5(string)
string_hash = str(hash_object.hexdigest())
return string[:50] + '...' + string[-50:] + '_' + string_hash
return string
def print_value(value):
""" Helper function to print functions, lists, and dictionaries for
filenames and printouts. """
if isinstance(value, str):
return value
try:
try:
string = reduce(lambda x, y: x+'-'+y,
[print_value(v) for v in value.items()])
except AttributeError: # Not dictionary
string = reduce(
lambda x, y: x+','+y, [print_value(v) for v in value])
except TypeError: # Not iterable
try:
string = value.func_name
except AttributeError: # Not function
string = str(value)
return string
def get_exp_name(args):
sorted_keys = np.sort(args.keys())
exp_name = reduce(lambda x, y: x+y,
['{}--{}/'.format(k, shorten(print_value(args[k])))
for k in sorted_keys], '')
return exp_name
def make_trainer(dataset, base_save_path, base_log_path,
nepochs=None, exp_class=experiment.Experiment,
fetcher_class=bfetchers.DatasetFetchers, **kwargs):
# Options.
# Load data.
# TODO: general data load
if isinstance(dataset, str):
print('Loading {}...'.format(dataset))
dataset = pickle.load(open(dataset, 'rb'))
print('Loaded.')
# Make the data fetchers.
if 'train_labels' in dataset and 'valid_labels' in dataset and \
'test_labels' in dataset:
# Labeled data.
fetchers = fetcher_class(
(dataset['train'], dataset['train_labels']),
(dataset['valid'], dataset['valid_labels']),
(dataset['test'], dataset['test_labels']))
else:
fetchers = fetcher_class(
(dataset['train'],), (dataset['valid'],), (dataset['test'],))
def main(args):
# Make config for trial with defualt and given arguments.
trial_args = copy.copy(kwargs)
for ind in args:
trial_args[ind] = args[ind]
# Data preprocessing
standardize = misc.get_default(trial_args, 'standardize', False)
cov_func = misc.get_default(trial_args, 'cov_func', None)
trial_args['first_do_linear_map'] = misc.get_default(
trial_args, 'first_do_linear_map', False)
# Get initial linear map parameters.
if trial_args['first_do_linear_map']:
try:
(imp, ib, ip) = helpers.get_initmap(
dataset['train'], standardize=standardize,
cov_func=cov_func)
trial_args['first_init_mat_params'] = imp
trial_args['first_init_b'] = ib
trial_args['first_perm'] = ip
except (TypeError, ValueError) as error:
print('No initial linear parameters due to error:\n{}'.format(
error))
# Determine the number of iterations to run nepochs
trial_args['batch_size'] = misc.get_default(
trial_args, 'batch_size', 256)
if nepochs is not None:
N, d = dataset['train'].shape
iters_per_epoch = N/float(trial_args['batch_size'])
trial_args['train_iters'] = int(nepochs*iters_per_epoch)
config = econfig.RedConfig(**trial_args)
# Make directories specific to experiment trial.
if base_save_path is not None:
save_path = os.path.join(base_save_path, get_exp_name(args))
misc.make_path(save_path)
else:
AttributeError('Must provide save path for validating model')
if base_log_path is not None:
log_path = os.path.join(base_log_path, get_exp_name(args))
misc.make_path(log_path)
else:
log_path = None
# Save config for easy model loading.
try:
pickle.dump(
trial_args, open(os.path.join(save_path, 'trial_args.p'), 'wb'))
except TypeError:
print('Could not save trial arg pickle file.')
# Set up trial and train.
exp = exp_class(
config, log_path, save_path, fetchers)
with exp.graph.as_default():
res_dicts = exp.main()
# Save results.
if log_path is not None:
pickle.dump(
res_dicts, open(os.path.join(log_path, 'result.p'), 'wb'))
else:
pickle.dump(
res_dicts, open(os.path.join(save_path, 'result.p'), 'wb'))
return res_dicts
return main
def invalid_result(result):
return result is None or np.isnan(result['loss'])
def run_experiment(data, arg_list=ARG_LIST, def_args=DEF_ARGS,
exp_class=experiment.Experiment,
fetcher_class=bfetchers.DatasetFetchers,
estimator='TAN', retries=1,
log_path=None, save_path=None, experiments_name=None,
no_log=False):
# Set up paths.
if log_path is None or save_path is None:
home = os.path.expanduser('~')
data_name = os.path.basename(data)
experiments_name = \
experiments_name if experiments_name is not None else \
datetime.now().strftime('%Y_%m_%d_%H:%M:%S.%f')
log_path = log_path if log_path is not None else \
os.path.join(
home, 'de_logs', estimator, data_name, experiments_name)
save_path = save_path if save_path is not None else \
os.path.join(
home, 'de_models', estimator, data_name, experiments_name)
if no_log:
log_path = None
else:
misc.make_path(log_path)
misc.make_path(save_path)
print('log path: {}\nsave path: {}'.format(log_path, save_path))
# Get results for all hyperparameter choices
main = make_trainer(data, save_path, log_path, exp_class=exp_class,
fetcher_class=fetcher_class, **def_args)
if no_log:
log_path = save_path
results = []
best = None
for ai in range(len(arg_list)):
args = arg_list[ai]
retries_left = retries
print('RUNNING {}'.format(experiments_name))
print('[{}/{}] {}'.format(ai+1, len(arg_list), args))
results.append(main(args))
while invalid_result(results[-1]) and retries_left > 0:
print('[{}/{}] Retrying {}'.format(ai+1, len(arg_list), args))
retries_left -= 1
results[-1] = main(args)
better_result = not invalid_result(results[-1]) and (
invalid_result(best) or best['loss'] > results[-1]['loss']
)
if better_result:
best = {}
best['loss'] = results[-1]['loss']
best['results'] = results[-1]
best['args'] = args
pickle.dump(
{'best': best, 'trial_results': results,
'trial_args': arg_list[:ai+1]},
open(os.path.join(log_path, experiments_name+'_all_trials.p'),
'wb'))
if best is not None:
best['save_path'] = save_path
best['log_path'] = log_path
best['def_args'] = def_args
pickle.dump(
best,
open(os.path.join(save_path, experiments_name+'_best_trial.p'), 'wb'))
return best, results
|
py | 1a4bd3f29af7565c6ef504fb9d0385081f6f618f | import warnings
from qiskit import QuantumRegister, ClassicalRegister
from qiskit import QuantumCircuit, Aer, transpile, assemble
from qiskit.tools.monitor import job_monitor
from qiskit.circuit.library import QFT
from qiskit.visualization import plot_histogram, plot_bloch_multivector
warnings.filterwarnings("ignore", category=DeprecationWarning)
import numpy as np
pi = np.pi
def qft_dagger(qc, n):
"""n-qubit QFTdagger the first n qubits in circ"""
# Don't forget the Swaps!
for qubit in range(n//2):
qc.swap(qubit, n-qubit-1)
for j in range(n):
for m in range(j):
qc.cp(-pi/float(2**(j-m)), m, j)
qc.h(j)
def generalised_qpe(amt_estimation_qubits, angle, shots=10000):
go = True
while go:
# Create and set up circuit
qpe3 = QuantumCircuit(amt_estimation_qubits+1, amt_estimation_qubits)
# Apply H-Gates to counting qubits:
for qubit in range(amt_estimation_qubits):
qpe3.h(qubit)
# Prepare our eigenstate |psi>:
repetitions = 1
for counting_qubit in range(amt_estimation_qubits):
for i in range(repetitions):
qpe3.cp(angle, counting_qubit, amt_estimation_qubits);
repetitions *= 2
# Do the inverse QFT:
qft_dagger(qpe3, amt_estimation_qubits)
# Measure of course!
qpe3.barrier()
for n in range(amt_estimation_qubits):
qpe3.measure(n,n)
aer_sim = Aer.get_backend('aer_simulator')
t_qpe3 = transpile(qpe3, aer_sim)
qobj = assemble(t_qpe3, shots=shots)
results = aer_sim.run(qobj).result()
answer = results.get_counts()
answer2 = {int(k,2)/2**amt_estimation_qubits: v for k, v in answer.items()}
print(answer2)
try:
freq = answer.most_frequent()
go = False
except:
pass
#print("Most frequent '" + answer.most_frequent() + "'")
print("Approx rotation angle by Z from the unitary in degrees '" + str(360 * int(answer.most_frequent(), 2)/2**amt_estimation_qubits) + "'")
#print("Phase Calculation " + answer.most_frequent())
##return(plot_histogram(answer))
##comment out the return if you want to see the histogram
return((int(answer.most_frequent(), 2)/2**amt_estimation_qubits))
|
py | 1a4bd5247ae4fb19c735c646b8c423b47696e7eb | #!/home/moringa/Documents/django/hood/virtual/bin/python3.6
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
py | 1a4bd59ddce6bfa11a52fb80dd683bd52cde6302 | """
ASGI config for mysite1 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dj4e-sample.settings')
application = get_asgi_application()
|
py | 1a4bd5c6403da47af75a2b0f0eb9bfa2525e94d8 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append("../")
import os
import json
import copy
import argparse
import numpy as np
from functools import partial
from collections import defaultdict
import paddle
from paddle import inference
from paddlenlp.datasets import load_dataset, MapDataset
from paddlenlp.data import Stack, Tuple, Pad
from paddlenlp.transformers import SkepTokenizer
from utils import decoding, read_test_file, load_dict
from extraction.data import convert_example_to_feature as convert_example_to_feature_ext
from classification.data import convert_example_to_feature as convert_example_to_feature_cls
class Predictor(object):
def __init__(self, args):
self.args = args
self.ext_predictor, self.ext_input_handles, self.ext_output_hanle = self.create_predictor(
args.ext_model_path)
print(f"ext_model_path: {args.ext_model_path}, {self.ext_predictor}")
self.cls_predictor, self.cls_input_handles, self.cls_output_hanle = self.create_predictor(
args.cls_model_path)
self.ext_label2id, self.ext_id2label = load_dict(args.ext_label_path)
self.cls_label2id, self.cls_id2label = load_dict(args.cls_label_path)
self.tokenizer = SkepTokenizer.from_pretrained(args.base_model_name)
def create_predictor(self, model_path):
model_file = model_path + ".pdmodel"
params_file = model_path + ".pdiparams"
if not os.path.exists(model_file):
raise ValueError("not find model file path {}".format(model_file))
if not os.path.exists(params_file):
raise ValueError("not find params file path {}".format(params_file))
config = paddle.inference.Config(model_file, params_file)
if self.args.device == "gpu":
# set GPU configs accordingly
# such as intialize the gpu memory, enable tensorrt
config.enable_use_gpu(100, 0)
precision_map = {
"fp16": inference.PrecisionType.Half,
"fp32": inference.PrecisionType.Float32,
"int8": inference.PrecisionType.Int8
}
precision_mode = precision_map[args.precision]
if args.use_tensorrt:
config.enable_tensorrt_engine(
max_batch_size=self.args.batch_size,
min_subgraph_size=30,
precision_mode=precision_mode)
elif self.args.device == "cpu":
# set CPU configs accordingly,
# such as enable_mkldnn, set_cpu_math_library_num_threads
config.disable_gpu()
if args.enable_mkldnn:
# cache 10 different shapes for mkldnn to avoid memory leak
config.set_mkldnn_cache_capacity(10)
config.enable_mkldnn()
config.set_cpu_math_library_num_threads(args.cpu_threads)
elif self.args.device == "xpu":
# set XPU configs accordingly
config.enable_xpu(100)
config.switch_use_feed_fetch_ops(False)
predictor = paddle.inference.create_predictor(config)
input_handles = [
predictor.get_input_handle(name)
for name in predictor.get_input_names()
]
output_handle = predictor.get_output_handle(predictor.get_output_names()
[0])
return predictor, input_handles, output_handle
def predict_ext(self, args):
ori_test_ds = load_dataset(
read_test_file, data_path=args.test_path, lazy=False)
trans_func = partial(
convert_example_to_feature_ext,
tokenizer=self.tokenizer,
label2id=self.ext_label2id,
max_seq_len=args.max_seq_len,
is_test=True)
test_ds = copy.copy(ori_test_ds).map(trans_func, lazy=False)
batch_list = [
test_ds[idx:idx + args.batch_size]
for idx in range(0, len(test_ds), args.batch_size)
]
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=self.tokenizer.pad_token_id),
Pad(axis=0, pad_val=self.tokenizer.pad_token_type_id),
Stack(dtype="int64"), ): fn(samples)
results = []
for bid, batch_data in enumerate(batch_list):
input_ids, token_type_ids, seq_lens = batchify_fn(batch_data)
self.ext_input_handles[0].copy_from_cpu(input_ids)
self.ext_input_handles[1].copy_from_cpu(token_type_ids)
self.ext_predictor.run()
logits = self.ext_output_hanle.copy_to_cpu()
predictions = logits.argmax(axis=2)
for eid, (seq_len,
prediction) in enumerate(zip(seq_lens, predictions)):
idx = bid * args.batch_size + eid
tag_seq = [
self.ext_id2label[idx] for idx in prediction[:seq_len][1:-1]
]
text = ori_test_ds[idx]["text"]
aps = decoding(text, tag_seq)
for aid, ap in enumerate(aps):
aspect, opinions = ap[0], list(set(ap[1:]))
aspect_text = self._concate_aspect_and_opinion(text, aspect,
opinions)
results.append({
"id": str(idx) + "_" + str(aid),
"aspect": aspect,
"opinions": opinions,
"text": text,
"aspect_text": aspect_text
})
return results
def predict_cls(self, args, ext_results):
test_ds = MapDataset(ext_results)
trans_func = partial(
convert_example_to_feature_cls,
tokenizer=self.tokenizer,
label2id=self.cls_label2id,
max_seq_len=args.max_seq_len,
is_test=True)
test_ds = test_ds.map(trans_func, lazy=False)
batch_list = [
test_ds[idx:idx + args.batch_size]
for idx in range(0, len(test_ds), args.batch_size)
]
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=self.tokenizer.pad_token_id),
Pad(axis=0, pad_val=self.tokenizer.pad_token_type_id),
Stack(dtype="int64")): fn(samples)
results = []
for batch_data in batch_list:
input_ids, token_type_ids, _ = batchify_fn(batch_data)
self.cls_input_handles[0].copy_from_cpu(input_ids)
self.cls_input_handles[1].copy_from_cpu(token_type_ids)
self.cls_predictor.run()
logits = self.cls_output_hanle.copy_to_cpu()
predictions = logits.argmax(axis=1).tolist()
results.extend(predictions)
return results
def post_process(self, args, ext_results, cls_results):
assert len(ext_results) == len(cls_results)
collect_dict = defaultdict(list)
for ext_result, cls_result in zip(ext_results, cls_results):
ext_result["sentiment_polarity"] = self.cls_id2label[cls_result]
eid, _ = ext_result["id"].split("_")
collect_dict[eid].append(ext_result)
sentiment_results = []
for eid in collect_dict.keys():
sentiment_result = {}
ap_list = []
for idx, single_ap in enumerate(collect_dict[eid]):
if idx == 0:
sentiment_result["text"] = single_ap["text"]
ap_list.append({
"aspect": single_ap["aspect"],
"opinions": single_ap["opinions"],
"sentiment_polarity": single_ap["sentiment_polarity"]
})
sentiment_result["ap_list"] = ap_list
sentiment_results.append(sentiment_result)
with open(args.save_path, "w", encoding="utf-8") as f:
for sentiment_result in sentiment_results:
f.write(json.dumps(sentiment_result, ensure_ascii=False) + "\n")
print(
f"sentiment analysis results has been saved to path: {args.save_path}"
)
def predict(self, args):
ext_results = self.predict_ext(args)
cls_results = self.predict_cls(args, ext_results)
self.post_process(args, ext_results, cls_results)
def _concate_aspect_and_opinion(self, text, aspect, opinion_words):
aspect_text = ""
for opinion_word in opinion_words:
if text.find(aspect) <= text.find(opinion_word):
aspect_text += aspect + opinion_word + ","
else:
aspect_text += opinion_word + aspect + ","
aspect_text = aspect_text[:-1]
return aspect_text
if __name__ == "__main__":
# yapf: disable
parser = argparse.ArgumentParser()
parser.add_argument("--base_model_name", default='skep_ernie_1.0_large_ch', type=str, help="Base model name, SKEP used by default", )
parser.add_argument("--ext_model_path", type=str, default=None, help="The path of extraction model path that you want to load.")
parser.add_argument("--cls_model_path", type=str, default=None, help="The path of classification model path that you want to load.")
parser.add_argument("--ext_label_path", type=str, default=None, help="The path of extraction label dict.")
parser.add_argument("--cls_label_path", type=str, default=None, help="The path of classification label dict.")
parser.add_argument('--test_path', type=str, default=None, help="The path of test set that you want to predict.")
parser.add_argument('--save_path', type=str, required=True, default=None, help="The saving path of predict results.")
parser.add_argument("--batch_size", type=int, default=16, help="Batch size per GPU/CPU for training.")
parser.add_argument("--max_seq_len", default=256, type=int, help="The maximum total input sequence length after tokenization.")
parser.add_argument("--use_tensorrt", action='store_true', help="Whether to use inference engin TensorRT.")
parser.add_argument("--precision", default="fp32", type=str, choices=["fp32", "fp16", "int8"],help='The tensorrt precision.')
parser.add_argument("--device", default="gpu", choices=["gpu", "cpu", "xpu"], help="Device selected for inference.")
parser.add_argument('--cpu_threads', default=10, type=int, help='Number of threads to predict when using cpu.')
parser.add_argument('--enable_mkldnn', default=False, type=eval, choices=[True, False], help='Enable to use mkldnn to speed up when using cpu.')
args = parser.parse_args()
# yapf: enbale
predictor = Predictor(args)
predictor.predict(args)
|
py | 1a4bd638e390748ebf8b45393394f4325ab2f78e | #!/usr/bin/python
# coding=utf-8
""" Fixtures for Pytest Unit tests
:usage:
Called by unit tests.
:authors
JP at 09/01/20
"""
import pytest
from app.imaging import read_image, get_distortion_array
@pytest.fixture
def client():
"""Flask test client with Google Cloud logging client removed."""
from main import create_app
app = create_app()
return app.test_client()
@pytest.fixture
def img_array():
return read_image()
@pytest.fixture
def dist_array():
return get_distortion_array()
|
py | 1a4bd75b26ec795fff46ced5766e5e8581b0b486 | import configargparse
|
py | 1a4bd9e09bc664704289c7382f698c94f1bcaf7e | import atexit
from datetime import date, datetime, timedelta
import io
import json
import logging
import os
import os.path
import random
import re
import requests
import subprocess
from sys import platform as _platform
import time
import zipfile
import imaplib
import email
import email.header
import sys # DEBUG
import warnings
try:
from StringIO import StringIO # Python 2
except ImportError:
from io import BytesIO as StringIO # Python 3
from selenium.common.exceptions import ElementNotInteractableException, ElementNotVisibleException, NoSuchElementException, StaleElementReferenceException, TimeoutException
from selenium.webdriver import ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.remote.webelement import WebElement
from seleniumrequests import Chrome
import xmltodict
try:
import pandas as pd
except ImportError:
pd = None
logger = logging.getLogger('mintapi')
logger.setLevel(logging.INFO)
def assert_pd():
# Common function to check if pd is installed
if not pd:
raise ImportError(
'transactions data requires pandas; '
'please pip install pandas'
)
def json_date_to_datetime(dateraw):
cy = date.today().year
try:
newdate = datetime.strptime(dateraw + str(cy), '%b %d%Y')
except ValueError:
newdate = convert_mmddyy_to_datetime(dateraw)
return newdate
def convert_mmddyy_to_datetime(date):
try:
newdate = datetime.strptime(date, '%m/%d/%y')
except (TypeError, ValueError):
newdate = None
return newdate
def convert_date_to_string(date):
date_string = None
if date:
date_string = date.strftime('%m/%d/%Y')
return date_string
def reverse_credit_amount(row):
amount = float(row['amount'][1:].replace(',', ''))
return amount if row['isDebit'] else -amount
def get_email_code(imap_account, imap_password, imap_server, imap_folder, debug=False, delete=True):
if debug:
warnings.warn(
"debug param to get_email_code() is deprecated and will be "
"removed soon; use: logging.getLogger('mintapi')"
".setLevel(logging.DEBUG) to show DEBUG log messages.",
DeprecationWarning)
code = None
imap_client = imaplib.IMAP4_SSL(imap_server)
try:
rv, data = imap_client.login(imap_account, imap_password)
except imaplib.IMAP4.error:
logger.error("ERROR: email login failed")
return ''
code = ''
for c in range(20):
time.sleep(10)
rv, data = imap_client.select(imap_folder)
if rv != 'OK':
logger.error("ERROR: Unable to open mailbox ", rv)
return ''
rv, data = imap_client.search(None, "ALL")
if rv != 'OK':
logger.error("ERROR: Email search failed")
return ''
count = 0
for num in data[0].split()[::-1]:
count = count + 1
if count > 3:
break
rv, data = imap_client.fetch(num, '(RFC822)')
if rv != 'OK':
logger.error("ERROR: ERROR getting message", num)
sys.exit(1)
msg = email.message_from_bytes(data[0][1])
x = email.header.make_header(email.header.decode_header(msg['Subject']))
subject = str(x)
logger.debug("DEBUG: SUBJECT:", subject)
x = email.header.make_header(email.header.decode_header(msg['From']))
frm = str(x)
logger.debug("DEBUG: FROM:", frm)
if not re.search('[email protected]', frm, re.IGNORECASE):
continue
if not re.search('Your Mint Account', subject, re.IGNORECASE):
continue
date_tuple = email.utils.parsedate_tz(msg['Date'])
if date_tuple:
local_date = datetime.fromtimestamp(email.utils.mktime_tz(date_tuple))
else:
logger.error("ERROR: FAIL0")
diff = datetime.now() - local_date
logger.debug("DEBUG: AGE:", diff.seconds)
if diff.seconds > 180:
continue
logger.debug("DEBUG: EMAIL HEADER OK")
body = str(msg)
p = re.search(r'Verification code:<.*?(\d\d\d\d\d\d)$', body,
re.S | re.M)
if p:
code = p.group(1)
else:
logger.error("FAIL1")
logger.debug("DEBUG: CODE FROM EMAIL:", code)
if code != '':
break
logger.debug("DEBUG: CODE FROM EMAIL 2:", code)
if code != '':
logger.debug("DEBUG: CODE FROM EMAIL 3:", code)
if delete and count > 0:
imap_client.store(num, '+FLAGS', '\\Deleted')
if delete:
imap_client.expunge()
break
imap_client.logout()
return code
CHROME_DRIVER_BASE_URL = 'https://chromedriver.storage.googleapis.com/'
CHROME_DRIVER_DOWNLOAD_PATH = '{version}/chromedriver_{arch}.zip'
CHROME_DRIVER_LATEST_RELEASE = 'LATEST_RELEASE'
CHROME_ZIP_TYPES = {
'linux': 'linux64',
'linux2': 'linux64',
'darwin': 'mac64',
'win32': 'win32',
'win64': 'win32'
}
version_pattern = re.compile(
"(?P<version>(?P<major>\\d+)\\.(?P<minor>\\d+)\\."
"(?P<build>\\d+)\\.(?P<patch>\\d+))")
def get_chrome_driver_url(version, arch):
return CHROME_DRIVER_BASE_URL + CHROME_DRIVER_DOWNLOAD_PATH.format(
version=version, arch=CHROME_ZIP_TYPES.get(arch))
def get_chrome_driver_major_version_from_executable(local_executable_path):
# Note; --version works on windows as well.
# check_output fails if running from a thread without a console on win10.
# To protect against this use explicit pipes for STDIN/STDERR.
# See: https://github.com/pyinstaller/pyinstaller/issues/3392
with open(os.devnull, 'wb') as devnull:
version = subprocess.check_output(
[local_executable_path, '--version'],
stderr=devnull,
stdin=devnull)
version_match = version_pattern.search(version.decode())
if not version_match:
return None
return version_match.groupdict()['major']
def get_latest_chrome_driver_version():
"""Returns the version of the latest stable chromedriver release."""
latest_url = CHROME_DRIVER_BASE_URL + CHROME_DRIVER_LATEST_RELEASE
latest_request = requests.get(latest_url)
if latest_request.status_code != 200:
raise RuntimeError(
'Error finding the latest chromedriver at {}, status = {}'.format(
latest_url, latest_request.status_code))
return latest_request.text
def get_stable_chrome_driver(download_directory=os.getcwd()):
chromedriver_name = 'chromedriver'
if _platform in ['win32', 'win64']:
chromedriver_name += '.exe'
local_executable_path = os.path.join(download_directory, chromedriver_name)
latest_chrome_driver_version = get_latest_chrome_driver_version()
version_match = version_pattern.match(latest_chrome_driver_version)
latest_major_version = None
if not version_match:
logger.error("Cannot parse latest chrome driver string: {}".format(
latest_chrome_driver_version))
else:
latest_major_version = version_match.groupdict()['major']
if os.path.exists(local_executable_path):
major_version = get_chrome_driver_major_version_from_executable(
local_executable_path)
if major_version == latest_major_version or not latest_major_version:
# Use the existing chrome driver, as it's already the latest
# version or the latest version cannot be determined at the moment.
return local_executable_path
logger.info('Removing old version {} of Chromedriver'.format(
major_version))
os.remove(local_executable_path)
if not latest_chrome_driver_version:
logger.critical(
'No local chrome driver found and cannot parse the latest chrome '
'driver on the internet. Please double check your internet '
'connection, then ask for assistance on the github project.')
return None
logger.info('Downloading version {} of Chromedriver'.format(
latest_chrome_driver_version))
zip_file_url = get_chrome_driver_url(
latest_chrome_driver_version, _platform)
request = requests.get(zip_file_url)
if request.status_code != 200:
raise RuntimeError(
'Error finding chromedriver at {}, status = {}'.format(
zip_file_url, request.status_code))
zip_file = zipfile.ZipFile(io.BytesIO(request.content))
zip_file.extractall(path=download_directory)
os.chmod(local_executable_path, 0o755)
return local_executable_path
def _create_web_driver_at_mint_com(headless=False, session_path=None, use_chromedriver_on_path=False, chromedriver_download_path=os.getcwd()):
"""
Handles starting a web driver at mint.com
"""
chrome_options = ChromeOptions()
if headless:
chrome_options.add_argument('headless')
chrome_options.add_argument('no-sandbox')
chrome_options.add_argument('disable-dev-shm-usage')
chrome_options.add_argument('disable-gpu')
# chrome_options.add_argument("--window-size=1920x1080")
if session_path is not None:
chrome_options.add_argument("user-data-dir=%s" % session_path)
if use_chromedriver_on_path:
driver = Chrome(options=chrome_options)
else:
driver = Chrome(
options=chrome_options,
executable_path=get_stable_chrome_driver(
chromedriver_download_path))
driver.get("https://www.mint.com")
driver.implicitly_wait(20) # seconds
return driver
def _sign_in(email, password, driver, mfa_method=None, mfa_token=None,
mfa_input_callback=None, intuit_account=None, wait_for_sync=True,
wait_for_sync_timeout=5 * 60,
imap_account=None, imap_password=None,
imap_server=None, imap_folder="INBOX",
):
"""
Takes in a web driver and gets it through the Mint sign in process
"""
try:
element = driver.find_element_by_link_text("Sign in")
except NoSuchElementException:
# when user has cookies, a slightly different front page appears
driver.implicitly_wait(0) # seconds
element = driver.find_element_by_link_text("Sign in")
driver.implicitly_wait(20) # seconds
element.click()
time.sleep(1)
try: # try to enter in credentials if username and password are on same page
email_input = driver.find_element_by_id("ius-userid")
if not email_input.is_displayed():
raise ElementNotVisibleException()
email_input.clear() # clear email and user specified email
email_input.send_keys(email)
driver.find_element_by_id("ius-password").send_keys(password)
driver.find_element_by_id("ius-sign-in-submit-btn").submit()
# try to enter in credentials if username and password are on different pages
except (ElementNotInteractableException, ElementNotVisibleException):
driver.implicitly_wait(0)
try:
email_input = driver.find_element_by_id("ius-identifier")
if not email_input.is_displayed():
raise ElementNotVisibleException()
email_input.clear() # clear email and use specified email
email_input.send_keys(email)
driver.find_element_by_id("ius-sign-in-submit-btn").click()
# click on username if on the saved usernames page
except (ElementNotInteractableException, ElementNotVisibleException):
username_elements = driver.find_elements_by_class_name('ius-option-username')
for username_element in username_elements:
if username_element.text == email:
username_element.click()
break
driver.implicitly_wait(5)
try:
driver.find_element_by_id(
"ius-sign-in-mfa-password-collection-current-password").send_keys(password)
driver.find_element_by_id(
"ius-sign-in-mfa-password-collection-continue-btn").submit()
except NoSuchElementException:
pass # password may not be here when using MFA
# Wait until logged in, just in case we need to deal with MFA.
while not driver.current_url.startswith(
'https://mint.intuit.com/overview.event'):
# An implicitly_wait is also necessary here to avoid getting stuck on
# find_element_by_id while the page is still in transition.
driver.implicitly_wait(1)
time.sleep(1)
# bypass "Let's add your current mobile number" interstitial page
try:
skip_for_now = driver.find_element_by_id('ius-verified-user-update-btn-skip')
skip_for_now.click()
except (NoSuchElementException, StaleElementReferenceException, ElementNotVisibleException):
pass
# mfa screen
try:
if mfa_method == 'soft-token':
import oathtool
mfa_token_input = driver.find_element_by_id('ius-mfa-soft-token')
mfa_code = oathtool.generate_otp(mfa_token)
mfa_token_input.send_keys(mfa_code)
mfa_token_submit = driver.find_element_by_id('ius-mfa-soft-token-submit-btn')
mfa_token_submit.click()
else:
try:
driver.find_element_by_id('ius-mfa-options-form')
mfa_method_option = driver.find_element_by_id(
'ius-mfa-option-{}'.format(mfa_method))
mfa_method_option.click()
mfa_method_submit = driver.find_element_by_id(
"ius-mfa-options-submit-btn")
mfa_method_submit.click()
except NoSuchElementException:
pass # no option to select mfa option
if mfa_method == 'email' and imap_account:
for element_id in ["ius-mfa-email-otp-card-challenge", "ius-sublabel-mfa-email-otp"]:
try:
mfa_email_select = driver.find_element_by_id(element_id)
mfa_email_select.click()
break
except (NoSuchElementException, ElementNotInteractableException):
pass # no option to select email address
if mfa_method == 'sms':
try:
mfa_sms_select = driver.find_element_by_id("ius-mfa-sms-otp-card-challenge")
mfa_sms_select.click()
except (NoSuchElementException, ElementNotInteractableException):
pass # no option to select sms
try:
mfa_code_input = driver.find_element_by_id("ius-mfa-confirm-code")
mfa_code_input.clear()
if mfa_method == 'email' and imap_account:
mfa_code = get_email_code(imap_account, imap_password, imap_server, imap_folder=imap_folder)
else:
mfa_code = (mfa_input_callback or input)("Please enter your 6-digit MFA code: ")
mfa_code_input.send_keys(mfa_code)
mfa_code_submit = driver.find_element_by_id("ius-mfa-otp-submit-btn")
mfa_code_submit.click()
except (NoSuchElementException, ElementNotInteractableException):
pass # we're not on mfa input screen
except NoSuchElementException:
pass # not on mfa screen
# account selection screen -- if there are multiple accounts, select one
try:
select_account = driver.find_element_by_id("ius-mfa-select-account-section")
if intuit_account is not None:
account_input = select_account.find_element_by_xpath(
"//label/span[text()='{}']/../preceding-sibling::input".format(intuit_account))
account_input.click()
driver.find_element_by_id("ius-sign-in-mfa-select-account-continue-btn").submit()
except NoSuchElementException:
pass # not on account selection screen
# password only sometimes after mfa
try:
driver.find_element_by_id("ius-sign-in-mfa-password-collection-current-password").send_keys(password)
driver.find_element_by_id("ius-sign-in-mfa-password-collection-continue-btn").submit()
except (NoSuchElementException, ElementNotInteractableException):
pass # not on secondary mfa password screen
finally:
driver.implicitly_wait(20) # seconds
def get_web_driver(email, password, headless=False, mfa_method=None, mfa_token=None,
mfa_input_callback=None, intuit_account=None, wait_for_sync=True,
wait_for_sync_timeout=5 * 60,
session_path=None, imap_account=None, imap_password=None,
imap_server=None, imap_folder="INBOX",
use_chromedriver_on_path=False,
chromedriver_download_path=os.getcwd()):
if headless and mfa_method is None:
logger.warning("Using headless mode without specifying an MFA method "
"is unlikely to lead to a successful login. Defaulting "
"--mfa-method=sms")
mfa_method = "sms"
driver = _create_web_driver_at_mint_com(
headless, session_path, use_chromedriver_on_path, chromedriver_download_path)
status_message = None
try:
_sign_in(email, password, driver, mfa_method, mfa_token, mfa_input_callback, intuit_account, wait_for_sync, wait_for_sync_timeout, imap_account,
imap_password, imap_server, imap_folder)
# Wait until the overview page has actually loaded, and if wait_for_sync==True, sync has completed.
if wait_for_sync:
try:
# Status message might not be present straight away. Seems to be due
# to dynamic content (client side rendering).
status_message = WebDriverWait(driver, 30).until(
expected_conditions.visibility_of_element_located(
(By.CSS_SELECTOR, ".SummaryView .message")))
WebDriverWait(driver, wait_for_sync_timeout).until(
lambda x: "Account refresh complete" in status_message.get_attribute('innerHTML')
)
except (TimeoutException, StaleElementReferenceException):
logger.warning("Mint sync apparently incomplete after timeout. "
"Data retrieved may not be current.")
else:
driver.find_element_by_id("transaction")
except Exception as e:
logger.exception(e)
driver.quit()
driver = None
if status_message is not None and isinstance(status_message, WebElement):
status_message = status_message.text
return driver, status_message
IGNORE_FLOAT_REGEX = re.compile(r"[$,%]")
def parse_float(str_number):
try:
return float(IGNORE_FLOAT_REGEX.sub('', str_number))
except ValueError:
return None
DATE_FIELDS = [
'addAccountDate',
'closeDate',
'fiLastUpdated',
'lastUpdated',
]
def convert_account_dates_to_datetime(account):
for df in DATE_FIELDS:
if df in account:
# Convert from javascript timestamp to unix timestamp
# http://stackoverflow.com/a/9744811/5026
try:
ts = account[df] / 1e3
except TypeError:
# returned data is not a number, don't parse
continue
account[df + 'InDate'] = datetime.fromtimestamp(ts)
MINT_ROOT_URL = 'https://mint.intuit.com'
MINT_ACCOUNTS_URL = 'https://accounts.intuit.com'
MINT_CREDIT_URL = 'https://credit.finance.intuit.com'
JSON_HEADER = {'accept': 'application/json'}
class MintException(Exception):
pass
class Mint(object):
request_id = 42 # magic number? random number?
token = None
driver = None
status_message = None
def __init__(self, email=None, password=None, mfa_method=None, mfa_token=None,
mfa_input_callback=None, intuit_account=None, headless=False, session_path=None,
imap_account=None, imap_password=None, imap_server=None,
imap_folder="INBOX", wait_for_sync=True, wait_for_sync_timeout=5 * 60,
use_chromedriver_on_path=False,
chromedriver_download_path=os.getcwd()):
if email and password:
self.login_and_get_token(email, password,
mfa_method=mfa_method,
mfa_token=mfa_token,
mfa_input_callback=mfa_input_callback,
intuit_account=intuit_account,
headless=headless,
session_path=session_path,
imap_account=imap_account,
imap_password=imap_password,
imap_server=imap_server,
imap_folder=imap_folder,
wait_for_sync=wait_for_sync,
wait_for_sync_timeout=wait_for_sync_timeout,
use_chromedriver_on_path=use_chromedriver_on_path,
chromedriver_download_path=chromedriver_download_path)
@classmethod
def create(cls, email, password, **opts):
return Mint(email, password, **opts)
@classmethod
def get_rnd(cls): # {{{
return (str(int(time.mktime(datetime.now().timetuple()))) + str(
random.randrange(999)).zfill(3))
def _get_api_key_header(self):
key_var = 'window.MintConfig.browserAuthAPIKey'
api_key = self.driver.execute_script('return ' + key_var)
auth = 'Intuit_APIKey intuit_apikey=' + api_key
auth += ', intuit_apikey_version=1.0'
header = {'authorization': auth}
header.update(JSON_HEADER)
return header
def close(self):
"""Logs out and quits the current web driver/selenium session."""
if not self.driver:
return
self.driver.quit()
self.driver = None
def request_and_check(self, url, method='get',
expected_content_type=None, **kwargs):
"""Performs a request, and checks that the status is OK, and that the
content-type matches expectations.
Args:
url: URL to request
method: either 'get' or 'post'
expected_content_type: prefix to match response content-type against
**kwargs: passed to the request method directly.
Raises:
RuntimeError if status_code does not match.
"""
assert method in ['get', 'post']
result = self.driver.request(method, url, **kwargs)
if result.status_code != requests.codes.ok:
raise RuntimeError('Error requesting %r, status = %d' %
(url, result.status_code))
if expected_content_type is not None:
content_type = result.headers.get('content-type', '')
if not re.match(expected_content_type, content_type):
raise RuntimeError(
'Error requesting %r, content type %r does not match %r' %
(url, content_type, expected_content_type))
return result
def get(self, url, **kwargs):
return self.driver.request('GET', url, **kwargs)
def post(self, url, **kwargs):
return self.driver.request('POST', url, **kwargs)
def login_and_get_token(self, email, password, mfa_method=None, mfa_token=None,
mfa_input_callback=None, intuit_account=None, headless=False,
session_path=None, imap_account=None,
imap_password=None,
imap_server=None,
imap_folder=None,
wait_for_sync=True,
wait_for_sync_timeout=5 * 60,
use_chromedriver_on_path=False,
chromedriver_download_path=os.getcwd()):
if self.token and self.driver:
return
self.driver, self.status_message = get_web_driver(
email, password,
mfa_method=mfa_method,
mfa_token=mfa_token,
mfa_input_callback=mfa_input_callback,
intuit_account=intuit_account,
headless=headless,
session_path=session_path,
imap_account=imap_account,
imap_password=imap_password,
imap_server=imap_server,
imap_folder=imap_folder,
wait_for_sync=wait_for_sync,
wait_for_sync_timeout=wait_for_sync_timeout,
use_chromedriver_on_path=use_chromedriver_on_path,
chromedriver_download_path=chromedriver_download_path)
if self.driver is not None: # check if sign in failed
self.token = self.get_token()
def get_token(self):
value_json = self.driver.find_element_by_name(
'javascript-user').get_attribute('value')
return json.loads(value_json)['token']
def get_request_id_str(self):
req_id = self.request_id
self.request_id += 1
return str(req_id)
def get_attention(self):
attention = None
# noinspection PyBroadException
try:
if "complete" in self.status_message:
attention = self.status_message.split(".")[1].strip()
else:
attention = self.status_message
except Exception:
pass
return attention
def get_bills(self):
return self.get(
'{}/bps/v2/payer/bills'.format(MINT_ROOT_URL),
headers=self._get_api_key_header()
).json()['bills']
def get_invests_json(self):
body = self.get(
'{}/investment.event'.format(MINT_ROOT_URL),
).text
p = re.search(r'<input name="json-import-node" type="hidden" value="json = ([^"]*);"', body)
if p:
return p.group(1).replace('"', '"')
else:
logger.error("FAIL2")
def get_accounts(self, get_detail=False): # {{{
# Issue service request.
req_id = self.get_request_id_str()
input = {
'args': {
'types': [
'BANK',
'CREDIT',
'INVESTMENT',
'LOAN',
'MORTGAGE',
'OTHER_PROPERTY',
'REAL_ESTATE',
'VEHICLE',
'UNCLASSIFIED'
]
},
'id': req_id,
'service': 'MintAccountService',
'task': 'getAccountsSorted'
# 'task': 'getAccountsSortedByBalanceDescending'
}
data = {'input': json.dumps([input])}
account_data_url = (
'{}/bundledServiceController.xevent?legacy=false&token={}'.format(
MINT_ROOT_URL, self.token))
response = self.post(
account_data_url,
data=data,
headers=JSON_HEADER
).text
if req_id not in response:
raise MintException('Could not parse account data: ' + response)
# Parse the request
response = json.loads(response)
accounts = response['response'][req_id]['response']
for account in accounts:
convert_account_dates_to_datetime(account)
if get_detail:
accounts = self.populate_extended_account_detail(accounts)
return accounts
def set_user_property(self, name, value):
url = (
'{}/bundledServiceController.xevent?legacy=false&token={}'.format(
MINT_ROOT_URL, self.token))
req_id = self.get_request_id_str()
result = self.post(
url,
data={'input': json.dumps([{'args': {'propertyName': name,
'propertyValue': value},
'service': 'MintUserService',
'task': 'setUserProperty',
'id': req_id}])},
headers=JSON_HEADER)
if result.status_code != 200:
raise MintException('Received HTTP error %d' % result.status_code)
response = result.text
if req_id not in response:
raise MintException(
'Could not parse response to set_user_property')
def get_transactions_json(self, include_investment=False,
skip_duplicates=False,
start_date=None, end_date=None, id=0):
"""Returns the raw JSON transaction data as downloaded from Mint. The JSON
transaction data includes some additional information missing from the
CSV data, such as whether the transaction is pending or completed, but
leaves off the year for current year transactions.
Warning: In order to reliably include or exclude duplicates, it is
necessary to change the user account property 'hide_duplicates' to the
appropriate value. This affects what is displayed in the web
interface. Note that the CSV transactions never exclude duplicates.
"""
# Warning: This is a global property for the user that we are changing.
self.set_user_property(
'hide_duplicates', 'T' if skip_duplicates else 'F')
# Converts the start date into datetime format - input must be mm/dd/yy
start_date = convert_mmddyy_to_datetime(start_date)
# Converts the end date into datetime format - input must be mm/dd/yy
end_date = convert_mmddyy_to_datetime(end_date)
all_txns = []
offset = 0
# Mint only returns some of the transactions at once. To get all of
# them, we have to keep asking for more until we reach the end.
while 1:
url = MINT_ROOT_URL + '/getJsonData.xevent'
params = {
'queryNew': '',
'offset': offset,
'comparableType': '8',
'startDate': convert_date_to_string(start_date),
'endDate': convert_date_to_string(end_date),
'rnd': Mint.get_rnd(),
}
# Specifying accountId=0 causes Mint to return investment
# transactions as well. Otherwise they are skipped by
# default.
if id > 0 or include_investment:
params['accountId'] = id
if include_investment:
params['task'] = 'transactions'
else:
params['task'] = 'transactions,txnfilters'
params['filterType'] = 'cash'
result = self.request_and_check(
url, headers=JSON_HEADER, params=params,
expected_content_type='text/json|application/json')
data = json.loads(result.text)
txns = data['set'][0].get('data', [])
if not txns:
break
all_txns.extend(txns)
offset += len(txns)
return all_txns
def get_detailed_transactions(self, include_investment=False,
skip_duplicates=False,
remove_pending=True,
start_date=None,
end_date=None):
"""Returns the JSON transaction data as a DataFrame, and converts
current year dates and prior year dates into consistent datetime
format, and reverses credit activity.
Note: start_date and end_date must be in format mm/dd/yy.
If pulls take too long, consider a narrower range of start and end
date. See json explanations of include_investment and skip_duplicates.
Also note: Mint includes pending transactions, however these sometimes
change dates/amounts after the transactions post. They have been
removed by default in this pull, but can be included by changing
remove_pending to False
"""
assert_pd()
result = self.get_transactions_json(include_investment,
skip_duplicates,
start_date, end_date)
df = pd.DataFrame(result)
df['odate'] = df['odate'].apply(json_date_to_datetime)
if remove_pending:
df = df[~df.isPending]
df.reset_index(drop=True, inplace=True)
df.amount = df.apply(reverse_credit_amount, axis=1)
return df
def get_transactions_csv(self, include_investment=False, acct=0):
"""Returns the raw CSV transaction data as downloaded from Mint.
If include_investment == True, also includes transactions that Mint
classifies as investment-related. You may find that the investment
transaction data is not sufficiently detailed to actually be useful,
however.
"""
# Specifying accountId=0 causes Mint to return investment
# transactions as well. Otherwise they are skipped by
# default.
params = None
if include_investment or acct > 0:
params = {'accountId': acct}
result = self.request_and_check(
'{}/transactionDownload.event'.format(MINT_ROOT_URL),
params=params,
expected_content_type='text/csv')
return result.content
def get_net_worth(self, account_data=None):
if account_data is None:
account_data = self.get_accounts()
# account types in this list will be subtracted
invert = set(['loan', 'loans', 'credit'])
return sum([
-a['currentBalance']
if a['accountType'] in invert else a['currentBalance']
for a in account_data if a['isActive']
])
def get_transactions(self, include_investment=False):
"""Returns the transaction data as a Pandas DataFrame."""
assert_pd()
s = StringIO(self.get_transactions_csv(
include_investment=include_investment))
s.seek(0)
df = pd.read_csv(s, parse_dates=['Date'])
df.columns = [c.lower().replace(' ', '_') for c in df.columns]
df.category = (df.category.str.lower()
.replace('uncategorized', pd.NA))
return df
def populate_extended_account_detail(self, accounts): # {{{
# I can't find any way to retrieve this information other than by
# doing this stupid one-call-per-account to listTransactions.xevent
# and parsing the HTML snippet :(
for account in accounts:
headers = dict(JSON_HEADER)
headers['Referer'] = '{}/transaction.event?accountId={}'.format(
MINT_ROOT_URL, account['id'])
list_txn_url = '{}/listTransaction.xevent'.format(MINT_ROOT_URL)
params = {
'accountId': str(account['id']),
'queryNew': '',
'offset': 0,
'comparableType': 8,
'acctChanged': 'T',
'rnd': Mint.get_rnd(),
}
response = json.loads(self.get(
list_txn_url, params=params, headers=headers).text)
xml = '<div>' + response['accountHeader'] + '</div>'
xml = xml.replace('–', '-')
xml = xmltodict.parse(xml)
account['availableMoney'] = None
account['totalFees'] = None
account['totalCredit'] = None
account['nextPaymentAmount'] = None
account['nextPaymentDate'] = None
xml = xml['div']['div'][1]['table']
if 'tbody' not in xml:
continue
xml = xml['tbody']
table_type = xml['@id']
xml = xml['tr'][1]['td']
if table_type == 'account-table-bank':
account['availableMoney'] = parse_float(xml[1]['#text'])
account['totalFees'] = parse_float(xml[3]['a']['#text'])
if (account['interestRate'] is None):
account['interestRate'] = (
parse_float(xml[2]['#text']) / 100.0
)
elif table_type == 'account-table-credit':
account['availableMoney'] = parse_float(xml[1]['#text'])
account['totalCredit'] = parse_float(xml[2]['#text'])
account['totalFees'] = parse_float(xml[4]['a']['#text'])
if account['interestRate'] is None:
account['interestRate'] = (
parse_float(xml[3]['#text']) / 100.0
)
elif table_type == 'account-table-loan':
account['nextPaymentAmount'] = (
parse_float(xml[1]['#text'])
)
account['nextPaymentDate'] = xml[2].get('#text', None)
elif table_type == 'account-type-investment':
account['totalFees'] = parse_float(xml[2]['a']['#text'])
return accounts
def get_categories(self): # {{{
# Get category metadata.
req_id = self.get_request_id_str()
data = {
'input': json.dumps([{
'args': {
'excludedCategories': [],
'sortByPrecedence': False,
'categoryTypeFilter': 'FREE'
},
'id': req_id,
'service': 'MintCategoryService',
'task': 'getCategoryTreeDto2'
}])
}
cat_url = (
'{}/bundledServiceController.xevent?legacy=false&token={}'.format(
MINT_ROOT_URL, self.token))
response = self.post(cat_url, data=data, headers=JSON_HEADER).text
if req_id not in response:
raise MintException(
'Could not parse category data: "{}"'.format(response))
response = json.loads(response)
response = response['response'][req_id]['response']
# Build category list
categories = {}
for category in response['allCategories']:
categories[category['id']] = category
return categories
def get_budgets(self, hist=None): # {{{
# Get categories
categories = self.get_categories()
# Issue request for budget utilization
first_of_this_month = date.today().replace(day=1)
eleven_months_ago = (first_of_this_month - timedelta(days=330)).replace(day=1)
url = "{}/getBudget.xevent".format(MINT_ROOT_URL)
params = {
'startDate': convert_date_to_string(eleven_months_ago),
'endDate': convert_date_to_string(first_of_this_month),
'rnd': Mint.get_rnd(),
}
response = json.loads(self.get(url, params=params, headers=JSON_HEADER).text)
if hist is not None: # version proofing api
def mos_to_yrmo(mos_frm_zero):
return datetime(year=int(mos_frm_zero / 12),
month=mos_frm_zero % 12 + 1,
day=1).strftime("%Y%m")
# Error checking 'hist' argument
if isinstance(hist, str) or hist > 12:
hist = 12 # MINT_ROOT_URL only calls last 12 months of budget data
elif hist < 1:
hist = 1
bgt_cur_mo = max(map(int, response['data']['income'].keys()))
min_mo_hist = bgt_cur_mo - hist
# Initialize and populate dictionary for return
# Output 'budgets' dictionary with structure
# { "YYYYMM": {"spending": [{"key": value, ...}, ...],
# "income": [{"key": value, ...}, ...] } }
budgets = {}
for months in range(bgt_cur_mo, min_mo_hist, -1):
budgets[mos_to_yrmo(months)] = {}
budgets[mos_to_yrmo(months)][
"income"] = response["data"]["income"][str(months)]['bu']
budgets[mos_to_yrmo(months)][
"spending"] = response["data"]["spending"][str(months)]['bu']
# Fill in the return structure
for month in budgets.keys():
for direction in budgets[month]:
for budget in budgets[month][direction]:
category = self.get_category_object_from_id(budget['cat'], categories)
budget['cat'] = category['name']
budget['parent'] = category['parent']['name']
else:
# Make the skeleton return structure
budgets = {
'income': response['data']['income'][
str(max(map(int, response['data']['income'].keys())))
]['bu'],
'spend': response['data']['spending'][
str(max(map(int, response['data']['spending'].keys())))
]['bu']
}
# Fill in the return structure
for direction in budgets.keys():
for budget in budgets[direction]:
category = self.get_category_object_from_id(budget['cat'], categories)
budget['cat'] = category['name']
# Uncategorized budget's parent is a string: 'Uncategorized'
if isinstance(category['parent'], dict):
budget['parent'] = category['parent']['name']
else:
budget['parent'] = category['parent']
return budgets
def get_category_from_id(self, cid, categories):
category = self.get_category_object_from_id(cid, categories)
return category['name']
def get_category_object_from_id(self, cid, categories):
if cid == 0:
return {'parent': 'Uncategorized', 'name': 'Uncategorized'}
for i in categories:
if categories[i]['id'] == cid:
return categories[i]
if 'children' in categories[i]:
for j in categories[i]['children']:
if categories[i][j]['id'] == cid:
return categories[i][j]
return {'parent': 'Unknown', 'name': 'Unknown'}
def initiate_account_refresh(self):
self.post(
'{}/refreshFILogins.xevent'.format(MINT_ROOT_URL),
data={'token': self.token},
headers=JSON_HEADER)
def get_credit_score(self):
# Request a single credit report, and extract the score
report = self.get_credit_report(limit=1, details=False)
try:
vendor = report['reports']['vendorReports'][0]
return vendor['creditReportList'][0]['creditScore']
except (KeyError, IndexError):
raise Exception('No Credit Score Found')
def get_credit_report(self, limit=2, details=True):
# Get the browser API key, build auth header
credit_header = self._get_api_key_header()
# Get credit reports. The UI shows 2 by default, but more are available!
# At least 8, but could be all the TransUnion reports Mint has
# How the "bands" are defined, and other metadata, is available at a
# /v1/creditscoreproviders/3 endpoint (3 = TransUnion)
credit_report = dict()
response = self.get(
'{}/v1/creditreports?limit={}'.format(MINT_CREDIT_URL, limit),
headers=credit_header)
credit_report['reports'] = response.json()
# If we want details, request the detailed sub-reports
if details:
# Get full list of credit inquiries
response = self.get(
'{}/v1/creditreports/0/inquiries'.format(MINT_CREDIT_URL),
headers=credit_header)
credit_report['inquiries'] = response.json()
# Get full list of credit accounts
response = self.get(
'{}/v1/creditreports/0/tradelines'.format(MINT_CREDIT_URL),
headers=credit_header)
credit_report['accounts'] = response.json()
# Get credit utilization history (~3 months, by account)
response = self.get(
'{}/v1/creditreports/creditutilizationhistory'.format(MINT_CREDIT_URL),
headers=credit_header)
clean_data = self.process_utilization(response.json())
credit_report['utilization'] = clean_data
return credit_report
def process_utilization(self, data):
# Function to clean up the credit utilization history data
utilization = []
utilization.extend(self.flatten_utilization(data['cumulative']))
for trade in data['tradelines']:
utilization.extend(self.flatten_utilization(trade))
return utilization
def flatten_utilization(self, data):
# The utilization history data has a nested format, grouped by year
# and then by month. Let's flatten that into a list of dates.
utilization = []
name = data.get('creditorName', 'Total')
for cu in data['creditUtilization']:
year = cu['year']
for cu_month in cu['months']:
date = datetime.strptime(cu_month['name'], '%B').replace(
day=1, year=int(year))
utilization.append({
'name': name,
'date': date.strftime('%Y-%m-%d'),
'utilization': cu_month['creditUtilization']
})
return utilization
def get_accounts(email, password, get_detail=False):
mint = Mint.create(email, password)
return mint.get_accounts(get_detail=get_detail)
def get_net_worth(email, password):
mint = Mint.create(email, password)
account_data = mint.get_accounts()
return mint.get_net_worth(account_data)
def make_accounts_presentable(accounts, presentable_format='EXCEL'):
formatter = {
'DATE': '%Y-%m-%d',
'ISO8601': '%Y-%m-%dT%H:%M:%SZ',
'EXCEL': '%Y-%m-%d %H:%M:%S',
}[presentable_format]
for account in accounts:
for k, v in account.items():
if isinstance(v, datetime):
account[k] = v.strftime(formatter)
return accounts
def print_accounts(accounts):
print(json.dumps(make_accounts_presentable(accounts), indent=2))
def get_budgets(email, password):
mint = Mint.create(email, password)
return mint.get_budgets()
def get_credit_score(email, password):
mint = Mint.create(email, password)
return mint.get_credit_score()
def get_credit_report(email, password):
mint = Mint.create(email, password)
return mint.get_credit_report()
def initiate_account_refresh(email, password):
mint = Mint.create(email, password)
return mint.initiate_account_refresh()
def main():
import getpass
import argparse
ARGUMENTS = [
(('email', ), {'nargs': '?', 'default': None, 'help': 'The e-mail address for your Mint.com account'}),
(('password', ), {'nargs': '?', 'default': None, 'help': 'The password for your Mint.com account'}),
(('--accounts', ), {'action': 'store_true', 'dest': 'accounts', 'default': False, 'help': 'Retrieve account information (default if nothing else is specified)'}),
(('--attention', ), {'action': 'store_true', 'help': 'Display accounts that need attention (None if none).'}),
(('--budgets', ), {'action': 'store_true', 'dest': 'budgets', 'default': False, 'help': 'Retrieve budget information'}),
(('--budget_hist', ), {'action': 'store_true', 'dest': 'budget_hist', 'default': None, 'help': 'Retrieve 12-month budget history information'}),
(('--chromedriver-download-path', ), {'default': os.getcwd(), 'help': 'The directory to download chromedrive to.'}),
(('--credit-report', ), {'action': 'store_true', 'dest': 'credit_report', 'default': False, 'help': 'Retrieve full credit report'}),
(('--credit-score', ), {'action': 'store_true', 'dest': 'credit_score', 'default': False, 'help': 'Retrieve current credit score'}),
(('--end-date', ), {'nargs': '?', 'default': None, 'help': 'Latest date for transactions to be retrieved from. Used with --extended-transactions. Format: mm/dd/yy'}),
(('--extended-accounts', ), {'action': 'store_true', 'dest': 'accounts_ext', 'default': False, 'help': 'Retrieve extended account information (slower, implies --accounts)'}),
(('--extended-transactions', ), {'action': 'store_true', 'default': False, 'help': 'Retrieve transactions with extra information and arguments'}),
(('--filename', '-f'), {'help': 'write results to file. can be {csv,json} format. default is to write to stdout.'}),
(('--headless', ), {'action': 'store_true', 'help': 'Whether to execute chromedriver with no visible window.'}),
(('--imap-account', ), {'default': None, 'help': 'IMAP login account'}),
(('--imap-folder', ), {'default': 'INBOX', 'help': 'IMAP folder'}),
(('--imap-password', ), {'default': None, 'help': 'IMAP login password'}),
(('--imap-server', ), {'default': None, 'help': 'IMAP server'}),
(('--imap-test', ), {'action': 'store_true', 'help': 'Test imap login and retrieval.'}),
(('--include-investment', ), {'action': 'store_true', 'default': False, 'help': 'Used with --extended-transactions'}),
(('--keyring', ), {'action': 'store_true', 'help': 'Use OS keyring for storing password information'}),
(('--mfa-method', ), {'choices': ['sms', 'email', 'soft-token'], 'default': 'sms', 'help': 'The MFA method to automate.'}),
(('--mfa-token', ), {'default': None, 'help': 'The MFA soft-token to pass to oathtool.'}),
(('--net-worth', ), {'action': 'store_true', 'dest': 'net_worth', 'default': False, 'help': 'Retrieve net worth information'}),
(('--no_wait_for_sync', ), {'action': 'store_true', 'default': False, 'help': 'By default, mint api will wait for accounts to sync with the backing financial institutions. If this flag is present, do not wait for them to sync.'}),
(('--session-path', ), {'nargs': '?', 'default': os.path.join(os.path.expanduser("~"), '.mintapi', 'session'), 'help': 'Directory to save browser session, including cookies. Used to prevent repeated MFA prompts. Defaults to $HOME/.mintapi/session. Set to None to use a temporary profile.'}),
# Displayed to the user as a postive switch, but processed back here as a negative
(('--show-pending', ), {'action': 'store_false', 'default': True, 'help': 'Exclude pending transactions from being retrieved. Used with --extended-transactions'}),
(('--skip-duplicates', ), {'action': 'store_true', 'default': False, 'help': 'Used with --extended-transactions'}),
(('--start-date', ), {'nargs': '?', 'default': None, 'help': 'Earliest date for transactions to be retrieved from. Used with --extended-transactions. Format: mm/dd/yy'}),
(('--transactions', '-t'), {'action': 'store_true', 'default': False, 'help': 'Retrieve transactions'}),
(('--use-chromedriver-on-path', ), {'action': 'store_true', 'help': 'Whether to use the chromedriver on PATH, instead of downloading a local copy.'}),
(('--wait_for_sync_timeout', ), {'type': int, 'default': 5 * 60, 'help': 'Number of seconds to wait for sync. Default is 5 minutes'}),
]
try:
import keyring
except ImportError:
keyring = None
# Parse command-line arguments {{{
cmdline = argparse.ArgumentParser()
for argument_commands, argument_options in ARGUMENTS:
cmdline.add_argument(*argument_commands, **argument_options)
options = cmdline.parse_args()
if options.keyring and not keyring:
cmdline.error('--keyring can only be used if the `keyring` '
'library is installed.')
try: # python 2.x
from __builtin__ import raw_input as input
except ImportError: # python 3
from builtins import input
except NameError:
pass
# Try to get the e-mail and password from the arguments
email = options.email
password = options.password
if not email:
# If the user did not provide an e-mail, prompt for it
email = input("Mint e-mail: ")
if keyring and not password:
# If the keyring module is installed and we don't yet have
# a password, try prompting for it
password = keyring.get_password('mintapi', email)
if not password:
# If we still don't have a password, prompt for it
password = getpass.getpass("Mint password: ")
if options.keyring:
# If keyring option is specified, save the password in the keyring
keyring.set_password('mintapi', email, password)
if options.accounts_ext:
options.accounts = True
if not any([options.accounts, options.budgets, options.transactions,
options.extended_transactions, options.net_worth, options.credit_score,
options.credit_report, options.attention]):
options.accounts = True
if options.session_path == 'None':
session_path = None
else:
session_path = options.session_path
mint = Mint.create(
email, password,
mfa_method=options.mfa_method,
mfa_token=options.mfa_token,
session_path=session_path,
headless=options.headless,
imap_account=options.imap_account,
imap_password=options.imap_password,
imap_server=options.imap_server,
imap_folder=options.imap_folder,
wait_for_sync=not options.no_wait_for_sync,
wait_for_sync_timeout=options.wait_for_sync_timeout,
use_chromedriver_on_path=options.use_chromedriver_on_path,
chromedriver_download_path=options.chromedriver_download_path
)
atexit.register(mint.close) # Ensure everything is torn down.
if options.imap_test:
mfa_code = get_email_code(
options.imap_account, options.imap_password, options.imap_server,
imap_folder=options.imap_folder, delete=False)
print("MFA CODE:", mfa_code)
sys.exit()
data = None
if options.accounts and options.budgets:
try:
accounts = make_accounts_presentable(
mint.get_accounts(get_detail=options.accounts_ext)
)
except Exception:
accounts = None
try:
budgets = mint.get_budgets()
except Exception:
budgets = None
data = {'accounts': accounts, 'budgets': budgets}
elif options.budgets:
try:
data = mint.get_budgets()
except Exception:
data = None
elif options.budget_hist:
try:
data = mint.get_budgets(hist=12)
except Exception:
data = None
elif options.accounts:
try:
data = make_accounts_presentable(mint.get_accounts(
get_detail=options.accounts_ext)
)
except Exception:
data = None
elif options.transactions:
data = mint.get_transactions(
include_investment=options.include_investment)
elif options.extended_transactions:
data = mint.get_detailed_transactions(
start_date=options.start_date,
end_date=options.end_date,
include_investment=options.include_investment,
remove_pending=options.show_pending,
skip_duplicates=options.skip_duplicates)
elif options.net_worth:
data = mint.get_net_worth()
elif options.credit_score:
data = mint.get_credit_score()
elif options.credit_report:
data = mint.get_credit_report(details=True)
# output the data
if options.transactions or options.extended_transactions:
if options.filename is None:
print(data.to_json(orient='records'))
elif options.filename.endswith('.csv'):
data.to_csv(options.filename, index=False)
elif options.filename.endswith('.json'):
data.to_json(options.filename, orient='records')
else:
raise ValueError('file extension must be either .csv or .json')
else:
if options.filename is None:
print(json.dumps(data, indent=2))
elif options.filename.endswith('.json'):
with open(options.filename, 'w+') as f:
json.dump(data, f, indent=2)
else:
raise ValueError('file type must be json for non-transaction data')
if options.attention:
attention_msg = mint.get_attention()
if attention_msg is None or attention_msg == "":
attention_msg = "no messages"
if options.filename is None:
print(attention_msg)
else:
with open(options.filename, 'w+') as f:
f.write(attention_msg)
if __name__ == '__main__':
main()
|
py | 1a4bdbf30d85a753c40b40dbda6e470e74fc8e3f | NEWS_API_KEY = 'bf2d30bc84e1419abab595739a664dd9'
SECRET_KEY = 'peter' |
py | 1a4bdebd5b0e1cba7767e1dc92858586198921a9 | import json
import logging
import os
import click
import google.auth.transport.grpc
import google.auth.transport.requests
import google.oauth2.credentials
import spotipy
from spotipy.oauth2 import SpotifyOAuth
from assistant import Assistant
ASSISTANT_API_ENDPOINT = 'embeddedassistant.googleapis.com'
DEFAULT_GRPC_DEADLINE = 60 * 3 + 5
SCOPE = 'user-read-playback-state user-modify-playback-state'
@click.group()
@click.pass_context
def spottv(ctx):
return
@spottv.command()
@click.pass_obj
def on(settings):
"""
Turn on TV and launch Spotify app
"""
send_text_query('turn on Google TV', settings['device_model_id'], settings['device_id'])
# play_spotify_uri(spotify_uri='')
@spottv.command()
@click.pass_obj
def off(settings):
"""
Turn off TV
"""
send_text_query('turn off TV', settings['device_model_id'], settings['device_id'])
@spottv.command()
@click.argument('playlist_name')
@click.pass_obj
def play(settings, playlist_name):
"""
Play a playlist defined in config.json
Args:
settings: Device info
playlist_name: Name of the playlist
"""
file = open('config.json')
config_data = json.load(file)
spotify_uri = config_data['playlists'][playlist_name]
file.close()
send_text_query('turn on Google TV', settings['device_model_id'], settings['device_id'])
play_spotify_uri(spotify_uri)
def play_spotify_uri(spotify_uri):
"""
Start playback of Spotify URI
Args:
spotify_uri (str): URI of Spotify track, album or playlist
"""
spotify_controller = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=SCOPE))
devices = spotify_controller.devices()
chromecast = None
if not devices:
click.echo('No device found')
else:
# click.echo(devices)
for device in devices['devices']:
if device['type'] == 'TV':
chromecast = device
break
if not chromecast:
click.echo('No Chromecast found')
else:
chromecast_id = chromecast['id']
chromecast_name = chromecast['name']
playlist = spotify_controller.playlist(spotify_uri)
playlist_name = playlist['name']
click.echo(f"Starting playback of '{playlist_name}' on {chromecast_name}...")
# spotify_controller.shuffle(True, chromecast_id)
spotify_controller.start_playback(device_id=chromecast_id, context_uri=spotify_uri)
def send_text_query(text_query, device_model_id, device_id):
"""Send a text query to specified device
Args:
text_query (str): text query to send (equivalent of a typed voice command).
device_model_id (str): identifier of the device model.
device_id (str): identifier of the registered device instance.
"""
credentials = os.path.join(click.get_app_dir('google-oauthlib-tool'), 'credentials.json')
# Setup logging.
# logging.basicConfig(level=logging.DEBUG if True else logging.INFO)
# Load OAuth 2.0 credentials.
try:
with open(credentials, 'r') as f:
credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f))
http_request = google.auth.transport.requests.Request()
credentials.refresh(http_request)
except Exception as e:
logging.error('Error loading credentials: %s', e)
logging.error('Run google-oauthlib-tool to initialize '
'new OAuth 2.0 credentials.')
logging.error('google-oauthlib-tool '
'--client-secrets client_secret_811734406476-tvp38peele577b6dfv7roigsdf727tog.apps'
'.googleusercontent.com.json '
'--scope https://www.googleapis.com/auth/assistant-sdk-prototype '
'--save --headless')
return
# Create an authorized gRPC channel.
grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
credentials,
http_request,
ASSISTANT_API_ENDPOINT
)
logging.info('Connecting to %s', ASSISTANT_API_ENDPOINT)
# Call Assistant
with Assistant('en-US',
device_model_id,
device_id,
grpc_channel,
DEFAULT_GRPC_DEADLINE
) as assistant:
assistant.assist(text_query=text_query)
def get_device_info():
device_info = {}
file = open('device_model.json')
model_data = json.load(file)
device_info['device_model_id'] = model_data['device_model_id']
file.close()
file = open('device_instance.json')
instance_data = json.load(file)
device_info['device_id'] = instance_data['id']
file.close()
return device_info
def main():
return spottv(obj=get_device_info())
if __name__ == '__main__':
main()
|
py | 1a4be064e43b61c264458379caf08121e2f87baa | # File: wmi_consts.py
#
# Copyright (c) 2016-2022 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#
#
# Json keys specific to wmi app's input parameters/config and the output result
WMI_JSON_QUERY = "query"
WMI_JSON_TOTAL_SERVICES = "total_services"
WMI_JSON_RUNNING_SERVICES = "running_services"
WMI_JSON_TOTAL_PROCESSES = "total_processes"
WMI_JSON_TOTAL_USERS = "total_users"
WMI_JSON_DISABLED_USERS = "disabled_users"
WMI_JSON_SYSTEM_DETAILS = "system_details"
WMI_JSON_OS_DETAILS = "os_details"
WMI_JSON_BOOT_CONFIG_DETAILS = "boot_config_details"
WMI_JSON_DNSHOSTNAME = "dns_hostname"
WMI_JSON_PHYSICAL_MEM = "memory"
WMI_JSON_WORKGROUP = "workgroup"
WMI_JSON_DOMAIN = "domain"
WMI_JSON_VERSION = "version"
# Status messages for wmi app
WMI_SUCC_QUERY_EXECUTED = "WMI Query executed"
WMI_ERR_QUERY_EXECUTION_FAILED = "WMI query failed."
WMI_ERR_QUERY_EXECUTION_FAILED += "\nPlease make sure remote WMI access is enabled on the target machine."
WMI_ERR_QUERY_EXECUTION_FAILED += "\nAny firewall if present is configured to allow remote WMI communication"
WMI_SUCC_SYS_INFO_QUERIED = "System info queried"
# Progress messages format string
WMI_MSG_CONNECTION_FAILED = "WMI connection to {machine} failed"
# Progress strings constants, define them first and then use them in the call to send_progress
CONN_PY_PROG_SENDING_QUERY = "Executing WMI query"
# Constants relating to '_get_error_message_from_exception'
WMI_ERR_CODE_MSG = "Error code unavailable"
WMI_ERR_MSG_UNAVAILABLE = "Error message unavailable. Please check the asset configuration and|or action parameters"
WMI_PARSE_ERR_MSG = "Unable to parse the error message. Please check the asset configuration and|or action parameters"
|
py | 1a4be0d81e9eb6321368a1e917644a6838f8fc0c | import logging
import os
import tempfile
from galaxy.tool_shed.galaxy_install.tool_dependencies.env_manager import EnvManager
from galaxy.tool_shed.galaxy_install.tool_dependencies.recipe.env_file_builder import EnvFileBuilder
from galaxy.tool_shed.galaxy_install.tool_dependencies.recipe.install_environment import InstallEnvironment
from galaxy.tool_shed.util import tool_dependency_util
from galaxy.tool_shed.util.basic_util import (
INSTALLATION_LOG,
remove_dir,
)
from galaxy.tool_shed.util.metadata_util import get_updated_changeset_revisions_from_tool_shed
from galaxy.tool_shed.util.repository_util import (
get_absolute_path_to_file_in_repository,
get_repository_for_dependency_relationship,
)
from galaxy.tool_util.deps.resolvers import NullDependency
from galaxy.util import (
listify,
url_get,
)
from galaxy.util.tool_shed.common_util import (
get_tool_shed_url_from_tool_shed_registry,
remove_protocol_from_tool_shed_url,
)
from galaxy.util.tool_shed.xml_util import parse_xml
log = logging.getLogger(__name__)
class RecipeTag:
"""Abstract class that defines a standard format for handling recipe tags when installing packages."""
def process_tag_set(
self,
tool_shed_repository,
tool_dependency,
package_elem,
package_name,
package_version,
tool_dependency_db_records=None,
):
raise Exception("Unimplemented Method")
class SyncDatabase:
def sync_database_with_file_system(
self,
app,
tool_shed_repository,
tool_dependency_name,
tool_dependency_version,
tool_dependency_install_dir,
tool_dependency_type="package",
):
"""
The installation directory defined by the received tool_dependency_install_dir exists, so check for
the presence of INSTALLATION_LOG. If the files exists, we'll assume the tool dependency is installed,
but not necessarily successfully (it could be in an error state on disk. However, we can justifiably
assume here that no matter the state, an associated database record will exist.
"""
# This method should be reached very rarely. It implies that either the Galaxy environment
# became corrupted (i.e., the database records for installed tool dependencies is not synchronized
# with tool dependencies on disk) or the Tool Shed's install and test framework is running. The Tool
# Shed's install and test framework installs repositories in 2 stages, those of type tool_dependency_definition
# followed by those containing valid tools and tool functional test components.
log.debug("Synchronizing the database with the file system...")
try:
log.debug(
"The value of app.config.running_functional_tests is: %s" % str(app.config.running_functional_tests)
)
except Exception:
pass
sa_session = app.install_model.context
can_install_tool_dependency = False
tool_dependency = tool_dependency_util.get_tool_dependency_by_name_version_type_repository(
app, tool_shed_repository, tool_dependency_name, tool_dependency_version, tool_dependency_type
)
if tool_dependency.status == app.install_model.ToolDependency.installation_status.INSTALLING:
# The tool dependency is in an Installing state, so we don't want to do anything to it. If the tool
# dependency is being installed by someone else, we don't want to interfere with that. This assumes
# the installation by "someone else" is not hung in an Installing state, which is a weakness if that
# "someone else" never repaired it.
log.debug(
"Skipping installation of tool dependency %s version %s because it has a status of %s"
% (str(tool_dependency.name), str(tool_dependency.version), str(tool_dependency.status))
)
else:
# We have a pre-existing installation directory on the file system, but our associated database record is
# in a state that allowed us to arrive here. At this point, we'll inspect the installation directory to
# see if we have a "proper installation" and if so, synchronize the database record rather than reinstalling
# the dependency if we're "running_functional_tests". If we're not "running_functional_tests, we'll set
# the tool dependency's installation status to ERROR.
tool_dependency_installation_directory_contents = os.listdir(tool_dependency_install_dir)
if INSTALLATION_LOG in tool_dependency_installation_directory_contents:
# Since this tool dependency's installation directory contains an installation log, we consider it to be
# installed. In some cases the record may be missing from the database due to some activity outside of
# the control of the Tool Shed. Since a new record was created for it and we don't know the state of the
# files on disk, we will set it to an error state (unless we are running Tool Shed functional tests - see
# below).
log.debug(
"Skipping installation of tool dependency %s version %s because it is installed in %s"
% (str(tool_dependency.name), str(tool_dependency.version), str(tool_dependency_install_dir))
)
if app.config.running_functional_tests:
# If we are running functional tests, the state will be set to Installed because previously compiled
# tool dependencies are not deleted by default, from the "install and test" framework..
tool_dependency.status = app.install_model.ToolDependency.installation_status.INSTALLED
else:
error_message = "The installation directory for this tool dependency had contents but the database had no record. "
error_message += (
"The installation log may show this tool dependency to be correctly installed, but due to the "
)
error_message += "missing database record it is now being set to Error."
tool_dependency.status = app.install_model.ToolDependency.installation_status.ERROR
tool_dependency.error_message = error_message
else:
error_message = (
"\nInstallation path %s for tool dependency %s version %s exists, but the expected file %s"
% (
str(tool_dependency_install_dir),
str(tool_dependency_name),
str(tool_dependency_version),
str(INSTALLATION_LOG),
)
)
error_message += " is missing. This indicates an installation error so the tool dependency is being"
error_message += " prepared for re-installation."
log.error(error_message)
tool_dependency.status = app.install_model.ToolDependency.installation_status.NEVER_INSTALLED
remove_dir(tool_dependency_install_dir)
can_install_tool_dependency = True
sa_session.add(tool_dependency)
sa_session.flush()
try:
log.debug(
"Returning from sync_database_with_file_system with tool_dependency %s, can_install_tool_dependency %s."
% (str(tool_dependency.name), str(can_install_tool_dependency))
)
except Exception as e:
log.debug(str(e))
return tool_dependency, can_install_tool_dependency
class Install(RecipeTag, SyncDatabase):
def __init__(self, app):
self.app = app
self.tag = "install"
def process_tag_set(
self,
tool_shed_repository,
tool_dependency,
package_elem,
package_name,
package_version,
tool_dependency_db_records=None,
):
# <install version="1.0">
# Get the installation directory for tool dependencies that will be installed for the received tool_shed_repository.
actions_elem_tuples = []
proceed_with_install = False
install_dir = tool_dependency_util.get_tool_dependency_install_dir(
app=self.app,
repository_name=tool_shed_repository.name,
repository_owner=tool_shed_repository.owner,
repository_changeset_revision=tool_shed_repository.installed_changeset_revision,
tool_dependency_type="package",
tool_dependency_name=package_name,
tool_dependency_version=package_version,
)
if os.path.exists(install_dir):
# Notice that we'll throw away the following tool_dependency if it can be installed.
tool_dependency, proceed_with_install = self.sync_database_with_file_system(
self.app,
tool_shed_repository,
package_name,
package_version,
install_dir,
tool_dependency_type="package",
)
if not proceed_with_install:
log.debug(
"Tool dependency %s version %s cannot be installed (it was probably previously installed), so returning it."
% (str(tool_dependency.name), str(tool_dependency.version))
)
return tool_dependency, proceed_with_install, actions_elem_tuples
else:
proceed_with_install = True
if proceed_with_install:
package_install_version = package_elem.get("version", "1.0")
status = self.app.install_model.ToolDependency.installation_status.INSTALLING
tool_dependency = tool_dependency_util.create_or_update_tool_dependency(
app=self.app,
tool_shed_repository=tool_shed_repository,
name=package_name,
version=package_version,
type="package",
status=status,
set_status=True,
)
# Get the information about the current platform in case the tool dependency definition includes tag sets
# for installing compiled binaries.
platform_info_dict = tool_dependency_util.get_platform_info_dict()
if package_install_version == "1.0":
# Handle tool dependency installation using a fabric method included in the Galaxy framework.
actions_elem_tuples = tool_dependency_util.parse_package_elem(
package_elem, platform_info_dict=platform_info_dict, include_after_install_actions=True
)
if not actions_elem_tuples:
proceed_with_install = False
error_message = f"Version {str(package_version)} of the {str(package_name)} package cannot be installed because "
error_message += "the recipe for installing the package is missing either an <actions> tag set or an <actions_group> "
error_message += "tag set."
# Since there was an installation error, update the tool dependency status to Error.
# The remove_installation_path option must be left False here.
tool_dependency = tool_dependency_util.set_tool_dependency_attributes(
self.app,
tool_dependency=tool_dependency,
status=self.app.install_model.ToolDependency.installation_status.ERROR,
error_message=error_message,
)
else:
raise NotImplementedError(
'Only install version 1.0 is currently supported (i.e., change your tag to be <install version="1.0">).'
)
return tool_dependency, proceed_with_install, actions_elem_tuples
class Package(RecipeTag):
def __init__(self, app):
self.app = app
self.tag = "package"
def process_tag_set(
self,
tool_shed_repository,
tool_dependency,
package_elem,
package_name,
package_version,
tool_dependency_db_records=None,
):
action_elem_tuples = []
proceed_with_install = False
# Only install the tool_dependency if it is not already installed and it is associated with a database
# record in the received tool_dependencies.
if package_name and package_version:
dependencies_ignored = not self.app.toolbox.dependency_manager.uses_tool_shed_dependencies()
if dependencies_ignored:
log.debug(
"Skipping installation of tool dependency package %s because tool shed dependency resolver not enabled."
% str(package_name)
)
# Tool dependency resolves have been configured and they do not include the tool shed. Do not install package.
dep = self.app.toolbox.dependency_manager.find_dep(package_name, package_version, type="package")
if not isinstance(dep, NullDependency):
# TODO: Do something here such as marking it installed or configured externally.
pass
tool_dependency = tool_dependency_util.set_tool_dependency_attributes(
self.app,
tool_dependency=tool_dependency,
status=self.app.install_model.ToolDependency.installation_status.ERROR,
)
else:
proceed_with_install = True
return tool_dependency, proceed_with_install, action_elem_tuples
class ReadMe(RecipeTag):
def __init__(self, app):
self.app = app
self.tag = "readme"
def process_tag_set(
self,
tool_shed_repository,
tool_dependency,
package_elem,
package_name,
package_version,
tool_dependency_db_records=None,
):
# Nothing to be done.
action_elem_tuples = []
proceed_with_install = False
return tool_dependency, proceed_with_install, action_elem_tuples
class Repository(RecipeTag, SyncDatabase):
def __init__(self, app):
self.app = app
self.tag = "repository"
def create_temporary_tool_dependencies_config(self, tool_shed_url, name, owner, changeset_revision):
"""Make a call to the tool shed to get the required repository's tool_dependencies.xml file."""
tool_shed_url = get_tool_shed_url_from_tool_shed_registry(self.app, tool_shed_url)
if tool_shed_url is None or name is None or owner is None or changeset_revision is None:
message = (
"Unable to retrieve required tool_dependencies.xml file from the Tool Shed because one or more of the "
)
message += (
"following required parameters is None: tool_shed_url: %s, name: %s, owner: %s, changeset_revision: %s "
% (str(tool_shed_url), str(name), str(owner), str(changeset_revision))
)
raise Exception(message)
params = dict(name=name, owner=owner, changeset_revision=changeset_revision)
pathspec = ["repository", "get_tool_dependencies_config_contents"]
text = url_get(
tool_shed_url, auth=self.app.tool_shed_registry.url_auth(tool_shed_url), pathspec=pathspec, params=params
)
if text:
# Write the contents to a temporary file on disk so it can be reloaded and parsed.
fh = tempfile.NamedTemporaryFile("w", prefix="tmp-toolshed-cttdc")
tmp_filename = fh.name
fh.close()
fh = open(tmp_filename, "w")
fh.write(text)
fh.close()
return tmp_filename
else:
message = "Unable to retrieve required tool_dependencies.xml file from the Tool Shed for revision "
message += f"{str(changeset_revision)} of installed repository {str(name)} owned by {str(owner)}."
raise Exception(message)
def create_tool_dependency_with_initialized_env_sh_file(
self,
dependent_install_dir,
tool_shed_repository,
required_repository,
package_name,
package_version,
tool_dependencies_config,
):
"""
Create or get a tool_dependency record that is defined by the received package_name and package_version.
An env.sh file will be created for the tool_dependency in the received dependent_install_dir.
"""
# The received required_repository refers to a tool_shed_repository record that is defined as a complex
# repository dependency for this tool_dependency. The required_repository may or may not be currently
# installed (it doesn't matter). If it is installed, it is associated with a tool_dependency that has
# an env.sh file that this new tool_dependency must be able to locate and "source". If it is not installed,
# we can still determine where that env.sh file will be, so we'll initialize this new tool_dependency's env.sh
# file in either case. If the required repository ends up with an installation error, this new tool
# dependency will still be fine because its containing repository will be defined as missing dependencies.
tool_dependencies = []
if not os.path.exists(dependent_install_dir):
os.makedirs(dependent_install_dir)
required_tool_dependency_env_file_path = None
if tool_dependencies_config:
required_td_tree, error_message = parse_xml(tool_dependencies_config)
if required_td_tree:
required_td_root = required_td_tree.getroot()
for required_td_elem in required_td_root:
# Find the appropriate package name and version.
if required_td_elem.tag == "package":
# <package name="bwa" version="0.5.9">
required_td_package_name = required_td_elem.get("name", None)
required_td_package_version = required_td_elem.get("version", None)
# Check the database to see if we have a record for the required tool dependency (we may not which is ok). If we
# find a record, we need to see if it is in an error state and if so handle it appropriately.
required_tool_dependency = (
tool_dependency_util.get_tool_dependency_by_name_version_type_repository(
self.app,
required_repository,
required_td_package_name,
required_td_package_version,
"package",
)
)
if required_td_package_name == package_name and required_td_package_version == package_version:
# Get or create a database tool_dependency record with which the installed package on disk will be associated.
tool_dependency = tool_dependency_util.create_or_update_tool_dependency(
app=self.app,
tool_shed_repository=tool_shed_repository,
name=package_name,
version=package_version,
type="package",
status=self.app.install_model.ToolDependency.installation_status.NEVER_INSTALLED,
set_status=True,
)
# Create an env.sh file for the tool_dependency whose first line will source the env.sh file located in
# the path defined by required_tool_dependency_env_file_path. It doesn't matter if the required env.sh
# file currently exists..
required_tool_dependency_env_file_path = self.get_required_repository_package_env_sh_path(
package_name, package_version, required_repository
)
env_file_builder = EnvFileBuilder(tool_dependency.installation_directory(self.app))
env_file_builder.append_line(action="source", value=required_tool_dependency_env_file_path)
return_code = env_file_builder.return_code
if return_code:
error_message = "Error defining env.sh file for package %s, return_code: %s" % (
str(package_name),
str(return_code),
)
tool_dependency = tool_dependency_util.set_tool_dependency_attributes(
self.app,
tool_dependency=tool_dependency,
status=self.app.install_model.ToolDependency.installation_status.ERROR,
error_message=error_message,
)
elif required_tool_dependency is not None and required_tool_dependency.in_error_state:
error_message = (
"This tool dependency's required tool dependency %s version %s has status %s."
% (
str(required_tool_dependency.name),
str(required_tool_dependency.version),
str(required_tool_dependency.status),
)
)
tool_dependency = tool_dependency_util.set_tool_dependency_attributes(
self.app,
tool_dependency=tool_dependency,
status=self.app.install_model.ToolDependency.installation_status.ERROR,
error_message=error_message,
)
else:
tool_dependency = tool_dependency_util.set_tool_dependency_attributes(
self.app,
tool_dependency=tool_dependency,
status=self.app.install_model.ToolDependency.installation_status.INSTALLED,
)
tool_dependencies.append(tool_dependency)
return tool_dependencies
def get_required_repository_package_env_sh_path(self, package_name, package_version, required_repository):
"""Return path to env.sh file in required repository if the required repository has been installed."""
env_sh_file_dir = tool_dependency_util.get_tool_dependency_install_dir(
app=self.app,
repository_name=required_repository.name,
repository_owner=required_repository.owner,
repository_changeset_revision=required_repository.installed_changeset_revision,
tool_dependency_type="package",
tool_dependency_name=package_name,
tool_dependency_version=package_version,
)
env_sh_file_path = os.path.join(env_sh_file_dir, "env.sh")
return env_sh_file_path
def handle_complex_repository_dependency_for_package(
self, elem, package_name, package_version, tool_shed_repository
):
"""
Inspect the repository defined by a complex repository dependency definition and take certain steps to
enable installation of the received package name and version to proceed. The received elem is the
<repository> tag set which defines the complex repository dependency. The received tool_shed_repository
is the installed tool shed repository for which the tool dependency defined by the received package_name
and package_version is being installed.
"""
handled_tool_dependencies = []
tool_shed_url = elem.attrib["toolshed"]
required_repository_name = elem.attrib["name"]
required_repository_owner = elem.attrib["owner"]
default_required_repository_changeset_revision = elem.attrib["changeset_revision"]
required_repository = get_repository_for_dependency_relationship(
self.app,
tool_shed_url,
required_repository_name,
required_repository_owner,
default_required_repository_changeset_revision,
)
tool_shed = remove_protocol_from_tool_shed_url(tool_shed_url)
tmp_filename = None
if required_repository:
required_repository_changeset_revision = required_repository.installed_changeset_revision
# Define the installation directory for the required tool dependency package in the required repository.
required_repository_package_install_dir = tool_dependency_util.get_tool_dependency_install_dir(
app=self.app,
repository_name=required_repository_name,
repository_owner=required_repository_owner,
repository_changeset_revision=required_repository_changeset_revision,
tool_dependency_type="package",
tool_dependency_name=package_name,
tool_dependency_version=package_version,
)
# Define this dependent repository's tool dependency installation directory that will contain
# the env.sh file with a path to the required repository's installed tool dependency package.
dependent_install_dir = tool_dependency_util.get_tool_dependency_install_dir(
app=self.app,
repository_name=tool_shed_repository.name,
repository_owner=tool_shed_repository.owner,
repository_changeset_revision=tool_shed_repository.installed_changeset_revision,
tool_dependency_type="package",
tool_dependency_name=package_name,
tool_dependency_version=package_version,
)
if os.path.exists(dependent_install_dir):
# Notice that we'll throw away the following tool_dependency if it can be installed.
tool_dependency, can_install_tool_dependency = self.sync_database_with_file_system(
self.app,
tool_shed_repository,
package_name,
package_version,
dependent_install_dir,
tool_dependency_type="package",
)
if not can_install_tool_dependency:
log.debug(
"Tool dependency %s version %s cannot be installed (it was probably previously installed), "
"so appending it to the list of handled tool dependencies.",
str(tool_dependency.name),
str(tool_dependency.version),
)
handled_tool_dependencies.append(tool_dependency)
else:
can_install_tool_dependency = True
if can_install_tool_dependency:
# Set this dependent repository's tool dependency env.sh file with a path to the required repository's
# installed tool dependency package. We can get everything we need from the discovered installed
# required_repository.
if required_repository.is_deactivated_or_installed:
if not os.path.exists(required_repository_package_install_dir):
log.error(
f"Missing required tool dependency directory {str(required_repository_package_install_dir)}"
)
repo_files_dir = required_repository.repo_files_directory(self.app)
if not repo_files_dir:
message = (
"Unable to locate the repository directory for revision %s of installed repository %s owned by %s."
% (
str(required_repository.changeset_revision),
str(required_repository.name),
str(required_repository.owner),
)
)
raise Exception(message)
tool_dependencies_config = get_absolute_path_to_file_in_repository(
repo_files_dir, "tool_dependencies.xml"
)
if tool_dependencies_config:
config_to_use = tool_dependencies_config
else:
message = (
"Unable to locate required tool_dependencies.xml file for revision %s of installed repository %s owned by %s."
% (
str(required_repository.changeset_revision),
str(required_repository.name),
str(required_repository.owner),
)
)
raise Exception(message)
else:
# Make a call to the tool shed to get the changeset revision to which the current value of required_repository_changeset_revision
# should be updated if it's not current.
text = get_updated_changeset_revisions_from_tool_shed(
app=self.app,
tool_shed_url=tool_shed,
name=required_repository_name,
owner=required_repository_owner,
changeset_revision=required_repository_changeset_revision,
)
if text:
updated_changeset_revisions = listify(text)
# The list of changeset revisions is in reverse order, so the newest will be first.
required_repository_changeset_revision = updated_changeset_revisions[0]
# Make a call to the tool shed to get the required repository's tool_dependencies.xml file.
tmp_filename = self.create_temporary_tool_dependencies_config(
tool_shed,
required_repository_name,
required_repository_owner,
required_repository_changeset_revision,
)
config_to_use = tmp_filename
handled_tool_dependencies = self.create_tool_dependency_with_initialized_env_sh_file(
dependent_install_dir=dependent_install_dir,
tool_shed_repository=tool_shed_repository,
required_repository=required_repository,
package_name=package_name,
package_version=package_version,
tool_dependencies_config=config_to_use,
)
self.remove_file(tmp_filename)
else:
message = "Unable to locate required tool shed repository named %s owned by %s with revision %s." % (
str(required_repository_name),
str(required_repository_owner),
str(default_required_repository_changeset_revision),
)
raise Exception(message)
return handled_tool_dependencies
def process_tag_set(
self,
tool_shed_repository,
tool_dependency,
package_elem,
package_name,
package_version,
tool_dependency_db_records=None,
):
# We have a complex repository dependency definition.
action_elem_tuples = []
proceed_with_install = False
rd_tool_dependencies = self.handle_complex_repository_dependency_for_package(
package_elem, package_name, package_version, tool_shed_repository
)
for rd_tool_dependency in rd_tool_dependencies:
if rd_tool_dependency.status == self.app.install_model.ToolDependency.installation_status.ERROR:
# We'll log the error here, but continue installing packages since some may not require this dependency.
log.error(
f"Error installing tool dependency for required repository: {str(rd_tool_dependency.error_message)}"
)
return tool_dependency, proceed_with_install, action_elem_tuples
def remove_file(self, file_name):
"""Attempt to remove a file from disk."""
if file_name:
if os.path.exists(file_name):
try:
os.remove(file_name)
except Exception:
pass
class SetEnvironment(RecipeTag):
def __init__(self, app):
self.app = app
self.tag = "set_environment"
def process_tag_set(
self,
tool_shed_repository,
tool_dependency,
package_elem,
package_name,
package_version,
tool_dependency_db_records=None,
):
# We need to handle two tag sets for package_elem here, this:
# <set_environment version="1.0">
# <environment_variable name="R_SCRIPT_PATH"action="set_to">$REPOSITORY_INSTALL_DIR</environment_variable>
# </set_environment>
# or this:
# <environment_variable name="R_SCRIPT_PATH"action="set_to">$REPOSITORY_INSTALL_DIR</environment_variable>
action_elem_tuples = []
proceed_with_install = False
if tool_dependency_db_records is None:
attr_tups_of_dependencies_for_install = []
else:
attr_tups_of_dependencies_for_install = [
(td.name, td.version, td.type) for td in tool_dependency_db_records
]
try:
self.set_environment(package_elem, tool_shed_repository, attr_tups_of_dependencies_for_install)
except Exception as e:
error_message = f"Error setting environment for tool dependency: {str(e)}"
log.debug(error_message)
return tool_dependency, proceed_with_install, action_elem_tuples
def set_environment(self, elem, tool_shed_repository, attr_tups_of_dependencies_for_install):
"""
Create a ToolDependency to set an environment variable. This is different from the process used to
set an environment variable that is associated with a package. An example entry in a tool_dependencies.xml
file is::
<set_environment version="1.0">
<environment_variable name="R_SCRIPT_PATH" action="set_to">$REPOSITORY_INSTALL_DIR</environment_variable>
</set_environment>
This method must also handle the sub-element tag::
<environment_variable name="R_SCRIPT_PATH" action="set_to">$REPOSITORY_INSTALL_DIR</environment_variable>
"""
# TODO: Add support for a repository dependency definition within this tool dependency type's tag set. This should look something like
# the following. See the implementation of support for this in the tool dependency package type's method above.
# This function is only called for set environment actions as defined below, not within an <install version="1.0"> tool
# dependency type. Here is an example of the tag set this function does handle:
# <action type="set_environment">
# <environment_variable name="PATH" action="prepend_to">$INSTALL_DIR</environment_variable>
# </action>
# Here is an example of the tag set this function does not handle:
# <set_environment version="1.0">
# <repository toolshed="<tool shed>" name="<repository name>" owner="<repository owner>" changeset_revision="<changeset revision>" />
# </set_environment>
env_manager = EnvManager(self.app)
tool_dependencies = []
env_var_version = elem.get("version", "1.0")
tool_shed_repository_install_dir = os.path.abspath(tool_shed_repository.repo_files_directory(self.app))
if elem.tag == "environment_variable":
# <environment_variable name="R_SCRIPT_PATH" action="set_to">$REPOSITORY_INSTALL_DIR</environment_variable>
elems = [elem]
else:
# <set_environment version="1.0">
# <environment_variable name="R_SCRIPT_PATH" action="set_to">$REPOSITORY_INSTALL_DIR</environment_variable>
# </set_environment>
elems = [env_var_elem for env_var_elem in elem]
for env_var_elem in elems:
env_var_name = env_var_elem.get("name")
if not env_var_name:
raise Exception("The <environment_variable> tag must have a name attribute")
# The value of env_var_name must match the text value of at least 1 <requirement> tag in the
# tool config's <requirements> tag set whose "type" attribute is "set_environment" (e.g.,
# <requirement type="set_environment">R_SCRIPT_PATH</requirement>).
env_var_action = env_var_elem.get("action")
if not env_var_action:
raise Exception("The <environment_variable> tag must have an action attribute")
# Tool dependencies of type "set_environment" always have the version attribute set to None.
attr_tup = (env_var_name, None, "set_environment")
if attr_tup in attr_tups_of_dependencies_for_install:
install_dir = tool_dependency_util.get_tool_dependency_install_dir(
app=self.app,
repository_name=tool_shed_repository.name,
repository_owner=tool_shed_repository.owner,
repository_changeset_revision=tool_shed_repository.installed_changeset_revision,
tool_dependency_type="set_environment",
tool_dependency_name=env_var_name,
tool_dependency_version=None,
)
install_environment = InstallEnvironment(
app=self.app,
tool_shed_repository_install_dir=tool_shed_repository_install_dir,
install_dir=install_dir,
)
env_var_dict = env_manager.create_env_var_dict(
elem=env_var_elem, install_environment=install_environment
)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
status = self.app.install_model.ToolDependency.installation_status.INSTALLING
tool_dependency = tool_dependency_util.create_or_update_tool_dependency(
app=self.app,
tool_shed_repository=tool_shed_repository,
name=env_var_name,
version=None,
type="set_environment",
status=status,
set_status=True,
)
if env_var_version == "1.0":
# Create this tool dependency's env.sh file.
env_file_builder = EnvFileBuilder(install_dir)
return_code = env_file_builder.append_line(make_executable=True, **env_var_dict)
if return_code:
error_message = "Error creating env.sh file for tool dependency %s, return_code: %s" % (
str(tool_dependency.name),
str(return_code),
)
log.debug(error_message)
status = self.app.install_model.ToolDependency.installation_status.ERROR
tool_dependency = tool_dependency_util.set_tool_dependency_attributes(
self.app, tool_dependency=tool_dependency, status=status, error_message=error_message
)
else:
if tool_dependency.status not in [
self.app.install_model.ToolDependency.installation_status.ERROR,
self.app.install_model.ToolDependency.installation_status.INSTALLED,
]:
status = self.app.install_model.ToolDependency.installation_status.INSTALLED
tool_dependency = tool_dependency_util.set_tool_dependency_attributes(
self.app, tool_dependency=tool_dependency, status=status
)
log.debug(
"Environment variable %s set in %s for tool dependency %s."
% (str(env_var_name), str(install_dir), str(tool_dependency.name))
)
else:
error_message = 'Only set_environment version 1.0 is currently supported (i.e., change your tag to be <set_environment version="1.0">).'
status = self.app.install_model.ToolDependency.installation_status.ERROR
tool_dependency = tool_dependency_util.set_tool_dependency_attributes(
self.app, tool_dependency=tool_dependency, status=status, error_message=error_message
)
tool_dependencies.append(tool_dependency)
return tool_dependencies
|
py | 1a4be1390ef45adec9ccae6d74a62d2577776047 | from torchio import RandomNoise
from ...utils import TorchioTestCase
class TestRandomNoise(TorchioTestCase):
"""Tests for `RandomNoise`."""
def test_no_noise(self):
transform = RandomNoise(mean=0., std=0.)
transformed = transform(self.sample_subject)
self.assertTensorAlmostEqual(
self.sample_subject.t1.data,
transformed.t1.data,
)
def test_with_noise(self):
transform = RandomNoise()
transformed = transform(self.sample_subject)
self.assertTensorNotEqual(
self.sample_subject.t1.data,
transformed.t1.data,
)
def test_constant_noise(self):
transform = RandomNoise(mean=(5., 5.), std=0.)
transformed = transform(self.sample_subject)
self.assertTensorAlmostEqual(
self.sample_subject.t1.data + 5,
transformed.t1.data,
)
def test_negative_std(self):
with self.assertRaises(ValueError):
RandomNoise(std=-2)
def test_std_range_with_negative_min(self):
with self.assertRaises(ValueError):
RandomNoise(std=(-0.5, 4))
def test_wrong_std_type(self):
with self.assertRaises(ValueError):
RandomNoise(std='wrong')
def test_wrong_mean_type(self):
with self.assertRaises(ValueError):
RandomNoise(mean='wrong')
|
py | 1a4be141336224a2695adfa0f24b5a3f332b6559 | import logging, math
from gi.repository import Gst, Gtk
class AudioLevelDisplay(object):
""" Displays a Level-Meter of another VideoDisplay into a GtkWidget """
def __init__(self, drawing_area):
self.log = logging.getLogger('AudioLevelDisplay[%s]' % drawing_area.get_name())
self.drawing_area = drawing_area
self.levelrms = []
self.levelpeak = []
self.leveldecay = []
# register on_draw handler
self.drawing_area.connect('draw', self.on_draw)
def on_draw(self, widget, cr):
# number of audio-channels
channels = len(self.levelrms)
if channels == 0:
return
width = self.drawing_area.get_allocated_width()
height = self.drawing_area.get_allocated_height()
# space between the channels in px
margin = 2
# 1 channel -> 0 margins, 2 channels -> 1 margin, 3 channels…
channel_width = int( (width - (margin * (channels - 1))) / channels )
# self.log.debug(
# 'width: %upx filled with %u channels of each %upx '
# 'and %ux margin of %upx',
# width, channels, channel_width, channels-1, margin)
# normalize db-value to 0…1 and multiply with the height
rms_px = [ self.normalize_db(db) * height for db in self.levelrms ]
peak_px = [ self.normalize_db(db) * height for db in self.levelpeak ]
decay_px = [ self.normalize_db(db) * height for db in self.leveldecay ]
# set the line-width >1, to get a nice overlap
cr.set_line_width(2)
# iterate over all pixels
for y in range(0, height):
# calculate our place in the color-gradient, clamp to 0…1
# 0 -> green, 0.5 -> yellow, 1 -> red
color = self.clamp(((y / height) - 0.6) / 0.42)
for channel in range(0, channels):
# start-coordinate for this channel
x = (channel * channel_width) + (channel * margin)
# calculate the brightness based on whether this line is in the
# active region
# default to 0.25, dark
bright = 0.25
if int(y - decay_px[channel]) in range(0, 2):
# decay marker, 2px wide, extra bright
bright = 1.5
elif y < rms_px[channel]:
# rms bar, full bright
bright = 1
elif y < peak_px[channel]:
# peak bar, a little darker
bright = 0.75
# set the color with a little reduced green
cr.set_source_rgb(
color * bright,
(1-color) * bright * 0.75,
0
)
# draw the marker
cr.move_to(x, height-y)
cr.line_to(x + channel_width, height-y)
cr.stroke()
# draw a black line for the margin
cr.set_source_rgb(0,0,0)
cr.move_to(x + channel_width, height-y)
cr.line_to(x + channel_width + margin, height-y)
cr.stroke()
# draw db text-markers
cr.set_source_rgb(1, 1, 1)
for db in [-40, -20, -10, -5, -4, -3, -2, -1]:
text = str(db)
xbearing, ybearing, textwidth, textheight, xadvance, yadvance = (
cr.text_extents(text))
y = self.normalize_db(db) * height
cr.move_to((width-textwidth) / 2, height - y - textheight)
cr.show_text(text)
return True
def normalize_db(self, db):
# -60db -> 1.00 (very quiet)
# -30db -> 0.75
# -15db -> 0.50
# -5db -> 0.25
# -0db -> 0.00 (very loud)
logscale = 1 - math.log10(-0.15 * db + 1)
return self.clamp(logscale)
def clamp(self, value, min_value=0, max_value=1):
return max(min(value, max_value), min_value)
def level_callback(self, rms, peak, decay):
self.levelrms = rms
self.levelpeak = peak
self.leveldecay = decay
self.drawing_area.queue_draw()
|
py | 1a4be1c87bc1032ce79018f546d43c3572cceca2 | from pynput import keyboard
import time
import BarcodeScanner as BB
def on_press(a):
#try:
global count
global s
if a!=keyboard.Key.shift and a!=keyboard.Key.enter :
#print('{0}'.format(a))
count = count+1
s = s+str(a.char)
if count==4:
return False
#except AttributeError:
#print('{0}'.format(key))
def on_release(key):
#print('{0}'.format(key))
time.sleep(0.1)
while True:
count = 0
s = ""
with keyboard.Listener(on_press=on_press,on_release=on_release) as listener:
#while True:
#print('test')
BB.Scan()
time.sleep(1)
print(s)
listener.join()
|
py | 1a4be22543af73441ad4c9ced85adf6351a9df98 | import pytest
import numpy as np
@pytest.fixture
def n():
"""Number of arrays"""
return 3
|
py | 1a4be25b09a988e598514145d00292458bb13ad0 | sent="hello how are you Wish you a happy birthday"
list=sent.split()
a=0
for a in list:
print(list.count(a))
|
py | 1a4be2a6a9af2bd965c8f7ffee2aedda2e286588 | import pandas as pd
from sklearn.metrics import mean_squared_error
import matplotlib
matplotlib.use('Agg') # for saving figures
import matplotlib.pyplot as plt
series = pd.read_csv('daily-users.csv', header=0, parse_dates=[0], index_col=0, squeeze=True)
from statsmodels.tsa.arima_model import ARIMA
f, axarr = plt.subplots(2, 2)
col = 0
row = 0
for pdq in [(3,0,0), (0,0,3), (3,1,0), (3,1,1)]:
print(pdq)
axarr[row, col].set_title('p = %d, d = %d, q = %d' % pdq)
model = ARIMA(series.ix[:'2017-12-31'], pdq, freq='D').fit()
predictions, _, _ = model.forecast(len(series.ix['2018-01-01':]))
print(predictions)
print(series.ix['2018-01-01':][:len(predictions)])
print("Mean squared error: %.2f"
% mean_squared_error(series.ix['2018-01-01':][:len(predictions)], predictions))
series.ix['2017-01-01':].plot(color='gray', linewidth=1, ax=axarr[row, col])
pred_series = pd.Series(predictions, index=series.ix['2018-01-01':][:len(predictions)].index)
pred_series.plot(color='blue', linewidth=3, ax=axarr[row, col])
axarr[row, col].axes.get_xaxis().set_visible(False)
axarr[row, col].axes.get_yaxis().set_visible(False)
col += 1
if col == 2:
col = 0
row += 1
plt.savefig('arima-daily-grid.png', dpi=300, bbox_inches='tight', pad_inches=0)
|
py | 1a4be2e871aa96e26635154ddde30a87e3214c63 | import gym
from torch import nn as nn
from rlkit.exploration_strategies.base import \
PolicyWrappedWithExplorationStrategy
from rlkit.exploration_strategies.epsilon_greedy import EpsilonGreedy
from rlkit.policies.argmax import ArgmaxDiscretePolicy
from rlkit.torch.vpg.ppo import PPOTrainer
from rlkit.torch.networks import Mlp
import rlkit.torch.pytorch_util as ptu
from rlkit.data_management.env_replay_buffer import EnvReplayBuffer
from rlkit.launchers.launcher_util import setup_logger
from rlkit.samplers.data_collector import MdpPathCollector
from rlkit.torch.torch_rl_algorithm import TorchOnlineRLAlgorithm
def experiment(variant):
from simple_sup_lstm import SimpleSupLSTMEnv
expl_env = SimpleSupLSTMEnv(**variant['env_kwargs'])
eval_env = SimpleSupLSTMEnv(**variant['env_kwargs'])
obs_dim = eval_env.observation_space.low.size
action_dim = eval_env.action_space.n
label_num = expl_env.label_num
label_dim = expl_env.label_dim
if variant['load_kwargs']['load']:
load_dir = variant['load_kwargs']['load_dir']
load_data = torch.load(load_dir+'/params.pkl',map_location='cpu')
policy = load_data['trainer/policy']
vf = load_data['trainer/value_function']
else:
hidden_dim = variant['lstm_kwargs']['hidden_dim']
num_lstm_layers = variant['lstm_kwargs']['num_layers']
node_dim = variant['gnn_kwargs']['node_dim']
node_num = expl_env.node_num
input_node_dim = expl_env.node_dim
a_0 = np.zeros(action_dim)
o_0 = np.zeros((node_num, hidden_dim*num_lstm_layers))
h_0 = np.zeros((node_num, hidden_dim*num_lstm_layers))
c_0 = np.zeros((node_num, hidden_dim*num_lstm_layers))
latent_0 = (o_0, h_0, c_0)
from lstm_net import LSTMNet
lstm_ego = LSTMNet(node_dim, action_dim, hidden_dim, num_lstm_layers)
lstm_other = LSTMNet(node_dim, 0, hidden_dim, num_lstm_layers)
from graph_builder import TrafficGraphBuilder
gb = TrafficGraphBuilder(input_dim=input_node_dim+hidden_dim, node_num=node_num,
ego_init=torch.tensor([0.,1.]),
other_init=torch.tensor([1.,0.]),
)
from gnn_net import GNNNet
gnn = GNNNet(
pre_graph_builder=gb,
node_dim=variant['gnn_kwargs']['node_dim'],
conv_type=variant['gnn_kwargs']['conv_type'],
num_conv_layers=variant['gnn_kwargs']['num_layers'],
hidden_activation=variant['gnn_kwargs']['activation'],
)
from gnn_lstm_net import GNNLSTMNet
policy_net = GNNLSTMNet(node_num,gnn,lstm_ego,lstm_other)
from layers import FlattenLayer, SelectLayer
post_net = nn.Sequential(
SelectLayer(-2,0),
FlattenLayer(2),
nn.ReLU(),
nn.Linear(hidden_dim,action_dim)
)
from softmax_lstm_policy import SoftmaxLSTMPolicy
policy = SoftmaxLSTMPolicy(
a_0=a_0,
latent_0=latent_0,
obs_dim=obs_dim,
action_dim=action_dim,
lstm_net=policy_net,
post_net=post_net,
)
print('parameters: ',np.sum([p.view(-1).shape[0] for p in policy.parameters()]))
vf = Mlp(
hidden_sizes=[32, 32],
input_size=obs_dim,
output_size=1,
) # TODO: id is also an input
vf_criterion = nn.MSELoss()
from rlkit.torch.policies.make_deterministic import MakeDeterministic
eval_policy = MakeDeterministic(policy)
expl_policy = policy
eval_path_collector = MdpPathCollector(
eval_env,
eval_policy,
)
expl_path_collector = MdpPathCollector(
expl_env,
expl_policy,
)
trainer = PPOTrainer(
policy=policy,
value_function=vf,
vf_criterion=vf_criterion,
recurrent=True,
**variant['trainer_kwargs']
)
algorithm = TorchOnlineRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
**variant['algorithm_kwargs']
)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', type=str, default='SimpleSupLSTM')
parser.add_argument('--node_num', type=int, default=5)
parser.add_argument('--node_dim', type=int, default=2)
parser.add_argument('--log_dir', type=str, default='PPOGNN')
parser.add_argument('--llayer', type=int, default=1)
parser.add_argument('--hidden', type=int, default=32)
parser.add_argument('--gnn', type=str, default='GSage')
parser.add_argument('--node', type=int, default=16)
parser.add_argument('--glayer', type=int, default=3)
parser.add_argument('--act', type=str, default='relu')
parser.add_argument('--lr', type=float, default=None)
parser.add_argument('--bs', type=int, default=None)
parser.add_argument('--epoch', type=int, default=None)
parser.add_argument('--load', action='store_true', default=False)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--snapshot_mode', type=str, default="gap_and_last")
parser.add_argument('--snapshot_gap', type=int, default=500)
args = parser.parse_args()
import os.path as osp
pre_dir = './Data/'+args.exp_name+'node'+str(args.node_num)+'dim'+str(args.node_dim)
main_dir = args.log_dir\
+('llayer'+str(args.llayer))\
+('hidden'+str(args.hidden))\
+args.gnn\
+('node'+str(args.node))\
+('glayer'+str(args.glayer))\
+('act'+args.act)\
+(('ep'+str(args.epoch)) if args.epoch else '')\
+(('lr'+str(args.lr)) if args.lr else '')\
+(('bs'+str(args.bs)) if args.bs else '')
log_dir = osp.join(pre_dir,main_dir,'seed'+str(args.seed))
max_path_length = 10
# noinspection PyTypeChecker
variant = dict(
lstm_kwargs=dict(
hidden_dim=args.hidden,
num_layers=args.llayer,
),
gnn_kwargs=dict(
conv_type=args.gnn,
node_dim=args.node,
num_layers=args.glayer,
activation=args.act,
),
env_kwargs=dict(
node_num=args.node_num,
node_dim=args.node_dim
),
algorithm_kwargs=dict(
num_epochs=(args.epoch if args.epoch else 1000),
num_eval_steps_per_epoch=1000,
num_train_loops_per_epoch=1,
num_trains_per_train_loop=1,
num_expl_steps_per_train_loop=(args.bs if args.bs else 1000),
max_path_length=max_path_length,
save_best=True,
),
trainer_kwargs=dict(
discount=0.99,
max_path_length=max_path_length,
policy_lr=(args.lr if args.lr else 1e-4),
vf_lr=(args.lr if args.lr else 1e-3),
),
load_kwargs=dict(
load=args.load,
load_dir=log_dir,
),
)
if args.load:
log_dir = log_dir + '_load'
import os
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
with open(osp.join(log_dir,'variant.json'),'w') as out_json:
import json
json.dump(variant,out_json,indent=2)
import sys
cmd_input = 'python ' + ' '.join(sys.argv) + '\n'
with open(osp.join(log_dir, 'cmd_input.txt'), 'a') as f:
f.write(cmd_input)
setup_logger(args.exp_name+'/'+main_dir, variant=variant,
snapshot_mode=args.snapshot_mode, snapshot_gap=args.snapshot_gap,
log_dir=log_dir)
import numpy as np
import torch
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# ptu.set_gpu_mode(True) # optionally set the GPU (default=False)
experiment(variant)
|
py | 1a4be4ff749e65e01e5a2430065d26fd40df37e9 | """Command to set a metadata attribute."""
import asyncio
from typing import Optional
import click
from astoria.astctl.command import Command
from astoria.common.ipc import MetadataSetManagerRequest
loop = asyncio.get_event_loop()
@click.command("set")
@click.argument("attribute")
@click.argument("value")
@click.option("-v", "--verbose", is_flag=True)
@click.option("-c", "--config-file", type=click.Path(exists=True))
def set(attribute: str, value: str, *, verbose: bool, config_file: Optional[str]) -> None:
"""Set a metadata attribute."""
command = SetMetadataCommand(attribute, value, verbose, config_file)
loop.run_until_complete(command.run())
class SetMetadataCommand(Command):
"""Set a metadata attribute."""
dependencies = ["astmetad"]
def __init__(
self,
attribute: str,
value: str,
verbose: bool,
config_file: Optional[str],
) -> None:
super().__init__(verbose, config_file)
self._attr = attribute
self._value = value
async def main(self) -> None:
"""Main method of the command."""
res = await self._mqtt.manager_request(
"astmetad",
"mutate",
MetadataSetManagerRequest(
sender_name=self.name,
attr=self._attr,
value=self._value,
),
)
if res.success:
print(f"Successfully set {self._attr} to {self._value}.")
if len(res.reason) > 0:
print(res.reason)
else:
print(f"Unable to set {self._attr} to {self._value}.")
if len(res.reason) > 0:
print(res.reason)
# Add timeout
self.halt(silent=True)
|
py | 1a4be5ee7393b605d79854ec27eee9fe04a81c28 | from __future__ import print_function
from __future__ import division
from six.moves import xrange
import os
import time
import tensorflow as tf
import numpy as np
from sklearn.preprocessing import StandardScaler
from lib.datasets import MNIST as Data
from lib.model import Model as BaseModel
from lib.segmentation import segmentation_adjacency, extract_features_fixed
# from lib.segmentation import slic_fixed
from lib.segmentation import quickshift_fixed
from lib.layer import SpatialCNN as Conv, FC
from lib.graph import receptive_fields, fill_features
from lib.pipeline import PreprocessedDataset, FileQueue
# SLIC_FEATURES = [4, 5, 6, 7, 8, 18, 20, 21, 22]
QUICKSHIFT_FEATURES = [4, 6, 7, 8, 24, 28, 29, 31, 37]
DATA_DIR = 'data/mnist'
# PREPROCESS_FIRST = 'data/mnist/slic_spatial'
PREPROCESS_FIRST = 'data/mnist/quickshift_spatial'
NODE_SIZE = 25
NODE_STRIDE = 4
DELTA = 3
NEIGHBORHOOD_SIZE = 25
CONNECTIVITY = 8
LEARNING_RATE = 0.001
TRAIN_DIR = None
# LOG_DIR = 'data/summaries/mnist_slic_spatial'
LOG_DIR = 'data/summaries/mnist_quickshift_spatial'
SAVE_STEP = 250
AUGMENT_TRAIN_EXAMPLES = False
DROPOUT = 0.5
BATCH_SIZE = 64
MAX_STEPS = 15000
DISPLAY_STEP = 10
# FORM_FEATURES = SLIC_FEATURES
FORM_FEATURES = QUICKSHIFT_FEATURES
NUM_FEATURES = len(FORM_FEATURES) + 1
data = Data(DATA_DIR)
# segmentation_algorithm = slic_fixed(
# num_segments=100, compactness=5, max_iterations=10, sigma=0)
segmentation_algorithm = quickshift_fixed(
ratio=1, kernel_size=2, max_dist=2, sigma=0)
feature_extraction_algorithm = extract_features_fixed(FORM_FEATURES)
def preprocess_spatial_fixed(
segmentation_algorithm, feature_extraction_algorithm, node_size,
node_stride, delta, neighborhood_size, connectivity):
def _preprocess(image):
segmentation = segmentation_algorithm(image)
adj, points, _ = segmentation_adjacency(segmentation, connectivity)
features = feature_extraction_algorithm(segmentation, image)
StandardScaler(copy=False).fit_transform(features)
fields = receptive_fields(points, adj, node_size, node_stride,
neighborhood_size, delta)
return fill_features(fields, features)
return _preprocess
preprocess_algorithm = preprocess_spatial_fixed(
segmentation_algorithm, feature_extraction_algorithm, NODE_SIZE,
NODE_STRIDE, DELTA, NEIGHBORHOOD_SIZE, CONNECTIVITY)
# Generate preprocessed dataset.
data.train = PreprocessedDataset(
os.path.join(PREPROCESS_FIRST, 'train'), data.train, preprocess_algorithm)
data.val = PreprocessedDataset(
os.path.join(PREPROCESS_FIRST, 'val'), data.val, preprocess_algorithm)
data.test = PreprocessedDataset(
os.path.join(PREPROCESS_FIRST, 'test'), data.test, preprocess_algorithm)
capacity = 10 * BATCH_SIZE
train_queue = FileQueue(data.train, BATCH_SIZE, capacity, shuffle=True)
val_queue = FileQueue(data.val, BATCH_SIZE, capacity, shuffle=True)
test_queue = FileQueue(data.test, BATCH_SIZE, capacity, shuffle=False)
placeholders = {
'features':
tf.placeholder(tf.float32,
[None, NODE_SIZE, NEIGHBORHOOD_SIZE,
NUM_FEATURES], 'features'),
'labels':
tf.placeholder(tf.uint8, [None, data.num_classes], 'labels'),
'dropout':
tf.placeholder(tf.float32, [], 'dropout'),
}
class Model(BaseModel):
def _build(self):
conv_1 = Conv(
NUM_FEATURES, 64, NEIGHBORHOOD_SIZE, logging=self.logging)
fc_1 = FC(NODE_SIZE * 64, 1024, logging=self.logging)
fc_2 = FC(
1024,
data.num_classes,
act=lambda x: x,
bias=False,
dropout=self.placeholders['dropout'],
logging=self.logging)
self.layers = [conv_1, fc_1, fc_2]
model = Model(
placeholders=placeholders,
learning_rate=LEARNING_RATE,
train_dir=TRAIN_DIR,
log_dir=LOG_DIR)
model.build()
global_step = model.initialize()
def feed_dict_with_batch(batch, dropout=0):
features = np.array([data[0] for data in batch], np.float32)
labels = np.array([data[1] for data in batch], np.uint8)
return {
placeholders['features']: features,
placeholders['labels']: labels,
placeholders['dropout']: DROPOUT,
}
try:
for step in xrange(global_step, MAX_STEPS):
t_pre = time.process_time()
batch = train_queue.dequeue()
feed_dict = feed_dict_with_batch(batch, DROPOUT)
t_pre = time.process_time() - t_pre
t_train = model.train(feed_dict, step)
if step % DISPLAY_STEP == 0:
# Evaluate on training and validation set with zero dropout.
feed_dict.update({model.placeholders['dropout']: 0})
train_info = model.evaluate(feed_dict, step, 'train')
batch = val_queue.dequeue()
feed_dict = feed_dict_with_batch(batch, DROPOUT)
val_info = model.evaluate(feed_dict, step, 'val')
log = 'step={}, '.format(step)
log += 'time={:.2f}s + {:.2f}s, '.format(t_pre, t_train)
log += 'train_loss={:.5f}, '.format(train_info[0])
log += 'train_acc={:.5f}, '.format(train_info[1])
log += 'val_loss={:.5f}, '.format(val_info[0])
log += 'val_acc={:.5f}'.format(val_info[1])
print(log)
if step % SAVE_STEP == 0:
model.save()
except KeyboardInterrupt:
print()
train_queue.close()
val_queue.close()
print('Optimization finished!')
print('Evaluate on test set. This can take a few minutes.')
try:
num_steps = data.test.num_examples // BATCH_SIZE
test_info = [0, 0]
for i in xrange(num_steps):
batch = test_queue.dequeue()
feed_dict = feed_dict_with_batch(batch, DROPOUT)
batch_info = model.evaluate(feed_dict)
test_info = [a + b for a, b in zip(test_info, batch_info)]
log = 'Test results: '
log += 'loss={:.5f}, '.format(test_info[0] / num_steps)
log += 'acc={:.5f}, '.format(test_info[1] / num_steps)
print(log)
except KeyboardInterrupt:
print()
print('Test evaluation aborted.')
test_queue.close()
|
py | 1a4be5f138c5f671b1280657629e4c4bbc642eea | from __future__ import division
import torch
from ignite.metrics.metric import Metric
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import sync_all_reduce, reinit__is_reduced
class TopKCategoricalAccuracy(Metric):
"""
Calculates the top-k categorical accuracy.
- `update` must receive output of the form `(y_pred, y)`.
"""
def __init__(self, k=5, output_transform=lambda x: x, device=None):
super(TopKCategoricalAccuracy, self).__init__(output_transform, device=device)
self._k = k
@reinit__is_reduced
def reset(self):
self._num_correct = 0
self._num_examples = 0
@reinit__is_reduced
def update(self, output):
y_pred, y = output
sorted_indices = torch.topk(y_pred, self._k, dim=1)[1]
expanded_y = y.view(-1, 1).expand(-1, self._k)
correct = torch.sum(torch.eq(sorted_indices, expanded_y), dim=1)
self._num_correct += torch.sum(correct).item()
self._num_examples += correct.shape[0]
@sync_all_reduce("_num_correct", "_num_examples")
def compute(self):
if self._num_examples == 0:
raise NotComputableError("TopKCategoricalAccuracy must have at"
"least one example before it can be computed.")
return self._num_correct / self._num_examples
|
py | 1a4be619f40f9860adda0cd5d819ec12d41dd1cf | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import logging
import mock
import pytest
import uuid
from collections import namedtuple
from datetime import datetime, timedelta
from django.utils import timezone
from time import time
from sentry.app import tsdb
from sentry.constants import VERSION_LENGTH
from sentry.event_manager import (
HashDiscarded, EventManager, EventUser,
md5_from_hash
)
from sentry.models import (
Activity, Environment, Event, ExternalIssue, Group, GroupEnvironment,
GroupHash, GroupLink, GroupRelease, GroupResolution, GroupStatus,
GroupTombstone, EventMapping, Integration, Release,
ReleaseProjectEnvironment, OrganizationIntegration, UserReport
)
from sentry.signals import event_discarded, event_saved
from sentry.testutils import assert_mock_called_once_with_partial, TransactionTestCase
from sentry.utils.data_filters import FilterStatKeys
def make_event(**kwargs):
result = {
'event_id': 'a' * 32,
'message': 'foo',
'timestamp': 1403007314.570599,
'level': logging.ERROR,
'logger': 'default',
'tags': [],
}
result.update(kwargs)
return result
class EventManagerTest(TransactionTestCase):
def make_release_event(self, release_name, project_id):
manager = EventManager(make_event(release=release_name))
manager.normalize()
event = manager.save(project_id)
return event
def test_key_id_remains_in_data(self):
manager = EventManager(make_event(key_id=12345))
manager.normalize()
assert manager.get_data()['key_id'] == 12345
event = manager.save(1)
assert event.data['key_id'] == 12345
def test_similar_message_prefix_doesnt_group(self):
# we had a regression which caused the default hash to just be
# 'event.message' instead of '[event.message]' which caused it to
# generate a hash per letter
manager = EventManager(make_event(event_id='a', message='foo bar'))
manager.normalize()
event1 = manager.save(1)
manager = EventManager(make_event(event_id='b', message='foo baz'))
manager.normalize()
event2 = manager.save(1)
assert event1.group_id != event2.group_id
@mock.patch('sentry.event_manager.should_sample')
def test_saves_event_mapping_when_sampled(self, should_sample):
should_sample.return_value = True
event_id = 'a' * 32
manager = EventManager(make_event(event_id=event_id))
event = manager.save(1)
# This is a brand new event, so it is actually saved.
# In this case, we don't need an EventMapping, but we
# do need the Event.
assert not EventMapping.objects.filter(
group_id=event.group_id,
event_id=event_id,
).exists()
assert Event.objects.filter(
event_id=event_id,
).exists()
event_id = 'b' * 32
manager = EventManager(make_event(event_id=event_id))
event = manager.save(1)
# This second is a dupe, so should be sampled
# For a sample, we want to store the EventMapping,
# but don't need to store the Event
assert EventMapping.objects.filter(
group_id=event.group_id,
event_id=event_id,
).exists()
assert not Event.objects.filter(
event_id=event_id,
).exists()
def test_platform_is_saved(self):
manager = EventManager(
make_event(
**{'sentry.interfaces.AppleCrashReport': {
'crash': {},
'binary_images': []
}}
)
)
manager.normalize()
event = manager.save(1)
assert 'sentry.interfacse.AppleCrashReport' not in event.interfaces
def test_ephemral_interfaces_removed_on_save(self):
manager = EventManager(make_event(platform='python'))
event = manager.save(1)
group = event.group
assert group.platform == 'python'
assert event.platform == 'python'
def test_dupe_message_id(self):
event_id = 'a' * 32
manager = EventManager(make_event(event_id=event_id))
manager.save(1)
assert Event.objects.count() == 1
# ensure that calling it again doesn't raise a db error
manager = EventManager(make_event(event_id=event_id))
manager.save(1)
assert Event.objects.count() == 1
def test_updates_group(self):
manager = EventManager(
make_event(
message='foo',
event_id='a' * 32,
checksum='a' * 32,
)
)
event = manager.save(1)
manager = EventManager(
make_event(
message='foo bar',
event_id='b' * 32,
checksum='a' * 32,
)
)
with self.tasks():
event2 = manager.save(1)
group = Group.objects.get(id=event.group_id)
assert group.times_seen == 2
assert group.last_seen.replace(microsecond=0) == event.datetime.replace(microsecond=0)
assert group.message == event2.message
assert group.data.get('type') == 'default'
assert group.data.get('metadata') == {
'title': 'foo bar',
}
def test_updates_group_with_fingerprint(self):
manager = EventManager(
make_event(
message='foo',
event_id='a' * 32,
fingerprint=['a' * 32],
)
)
with self.tasks():
event = manager.save(1)
manager = EventManager(
make_event(
message='foo bar',
event_id='b' * 32,
fingerprint=['a' * 32],
)
)
with self.tasks():
event2 = manager.save(1)
group = Group.objects.get(id=event.group_id)
assert group.times_seen == 2
assert group.last_seen.replace(microsecond=0) == event.datetime.replace(microsecond=0)
assert group.message == event2.message
def test_differentiates_with_fingerprint(self):
manager = EventManager(
make_event(
message='foo',
event_id='a' * 32,
fingerprint=['{{ default }}', 'a' * 32],
)
)
with self.tasks():
manager.normalize()
event = manager.save(1)
manager = EventManager(
make_event(
message='foo bar',
event_id='b' * 32,
fingerprint=['a' * 32],
)
)
with self.tasks():
manager.normalize()
event2 = manager.save(1)
assert event.group_id != event2.group_id
def test_unresolves_group(self):
# N.B. EventManager won't unresolve the group unless the event2 has a
# later timestamp than event1. MySQL doesn't support microseconds.
manager = EventManager(
make_event(
event_id='a' * 32,
checksum='a' * 32,
timestamp=1403007314,
)
)
with self.tasks():
event = manager.save(1)
group = Group.objects.get(id=event.group_id)
group.status = GroupStatus.RESOLVED
group.save()
assert group.is_resolved()
manager = EventManager(
make_event(
event_id='b' * 32,
checksum='a' * 32,
timestamp=1403007345,
)
)
event2 = manager.save(1)
assert event.group_id == event2.group_id
group = Group.objects.get(id=group.id)
assert not group.is_resolved()
@mock.patch('sentry.event_manager.plugin_is_regression')
def test_does_not_unresolve_group(self, plugin_is_regression):
# N.B. EventManager won't unresolve the group unless the event2 has a
# later timestamp than event1. MySQL doesn't support microseconds.
plugin_is_regression.return_value = False
manager = EventManager(
make_event(
event_id='a' * 32,
checksum='a' * 32,
timestamp=1403007314,
)
)
with self.tasks():
event = manager.save(1)
group = Group.objects.get(id=event.group_id)
group.status = GroupStatus.RESOLVED
group.save()
assert group.is_resolved()
manager = EventManager(
make_event(
event_id='b' * 32,
checksum='a' * 32,
timestamp=1403007315,
)
)
event2 = manager.save(1)
assert event.group_id == event2.group_id
group = Group.objects.get(id=group.id)
assert group.is_resolved()
@mock.patch('sentry.tasks.activity.send_activity_notifications.delay')
@mock.patch('sentry.event_manager.plugin_is_regression')
def test_marks_as_unresolved_with_new_release(
self, plugin_is_regression, mock_send_activity_notifications_delay
):
plugin_is_regression.return_value = True
old_release = Release.objects.create(
version='a',
organization_id=self.project.organization_id,
date_added=timezone.now() - timedelta(minutes=30),
)
old_release.add_project(self.project)
manager = EventManager(
make_event(
event_id='a' * 32,
checksum='a' * 32,
timestamp=time() - 50000, # need to work around active_at
release=old_release.version,
)
)
event = manager.save(1)
group = event.group
group.update(status=GroupStatus.RESOLVED)
resolution = GroupResolution.objects.create(
release=old_release,
group=group,
)
activity = Activity.objects.create(
group=group,
project=group.project,
type=Activity.SET_RESOLVED_IN_RELEASE,
ident=resolution.id,
data={'version': ''},
)
manager = EventManager(
make_event(
event_id='b' * 32,
checksum='a' * 32,
timestamp=time(),
release=old_release.version,
)
)
event = manager.save(1)
assert event.group_id == group.id
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.RESOLVED
activity = Activity.objects.get(id=activity.id)
assert activity.data['version'] == ''
assert GroupResolution.objects.filter(group=group).exists()
manager = EventManager(
make_event(
event_id='c' * 32,
checksum='a' * 32,
timestamp=time(),
release='b',
)
)
event = manager.save(1)
assert event.group_id == group.id
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.UNRESOLVED
activity = Activity.objects.get(id=activity.id)
assert activity.data['version'] == 'b'
assert not GroupResolution.objects.filter(group=group).exists()
activity = Activity.objects.get(
group=group,
type=Activity.SET_REGRESSION,
)
mock_send_activity_notifications_delay.assert_called_once_with(activity.id)
@mock.patch('sentry.integrations.example.integration.ExampleIntegration.sync_status_outbound')
@mock.patch('sentry.tasks.activity.send_activity_notifications.delay')
@mock.patch('sentry.event_manager.plugin_is_regression')
def test_marks_as_unresolved_with_new_release_with_integration(
self, plugin_is_regression, mock_send_activity_notifications_delay, mock_sync_status_outbound
):
plugin_is_regression.return_value = True
old_release = Release.objects.create(
version='a',
organization_id=self.project.organization_id,
date_added=timezone.now() - timedelta(minutes=30),
)
old_release.add_project(self.project)
manager = EventManager(
make_event(
event_id='a' * 32,
checksum='a' * 32,
timestamp=time() - 50000, # need to work around active_at
release=old_release.version,
)
)
event = manager.save(1)
group = event.group
org = group.organization
integration = Integration.objects.create(
provider='example',
name='Example',
)
integration.add_organization(org, self.user)
OrganizationIntegration.objects.filter(
integration_id=integration.id,
organization_id=group.organization.id,
).update(
config={
'sync_comments': True,
'sync_status_outbound': True,
'sync_status_inbound': True,
'sync_assignee_outbound': True,
'sync_assignee_inbound': True,
}
)
external_issue = ExternalIssue.objects.get_or_create(
organization_id=org.id,
integration_id=integration.id,
key='APP-%s' % group.id,
)[0]
GroupLink.objects.get_or_create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)[0]
group.update(status=GroupStatus.RESOLVED)
resolution = GroupResolution.objects.create(
release=old_release,
group=group,
)
activity = Activity.objects.create(
group=group,
project=group.project,
type=Activity.SET_RESOLVED_IN_RELEASE,
ident=resolution.id,
data={'version': ''},
)
manager = EventManager(
make_event(
event_id='b' * 32,
checksum='a' * 32,
timestamp=time(),
release=old_release.version,
)
)
with self.tasks():
with self.feature({
'organizations:integrations-issue-sync': True,
}):
event = manager.save(1)
assert event.group_id == group.id
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.RESOLVED
activity = Activity.objects.get(id=activity.id)
assert activity.data['version'] == ''
assert GroupResolution.objects.filter(group=group).exists()
manager = EventManager(
make_event(
event_id='c' * 32,
checksum='a' * 32,
timestamp=time(),
release='b',
)
)
event = manager.save(1)
mock_sync_status_outbound.assert_called_once_with(
external_issue, False, event.group.project_id
)
assert event.group_id == group.id
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.UNRESOLVED
activity = Activity.objects.get(id=activity.id)
assert activity.data['version'] == 'b'
assert not GroupResolution.objects.filter(group=group).exists()
activity = Activity.objects.get(
group=group,
type=Activity.SET_REGRESSION,
)
mock_send_activity_notifications_delay.assert_called_once_with(activity.id)
@mock.patch('sentry.models.Group.is_resolved')
def test_unresolves_group_with_auto_resolve(self, mock_is_resolved):
mock_is_resolved.return_value = False
manager = EventManager(
make_event(
event_id='a' * 32,
checksum='a' * 32,
timestamp=1403007314,
)
)
with self.tasks():
event = manager.save(1)
mock_is_resolved.return_value = True
manager = EventManager(
make_event(
event_id='b' * 32,
checksum='a' * 32,
timestamp=1403007414,
)
)
with self.tasks():
event2 = manager.save(1)
assert event.group_id == event2.group_id
group = Group.objects.get(id=event.group.id)
assert group.active_at == event2.datetime != event.datetime
def test_invalid_transaction(self):
dict_input = {'messages': 'foo'}
manager = EventManager(make_event(
transaction=dict_input,
))
manager.normalize()
event = manager.save(1)
assert event.transaction is None
def test_transaction_as_culprit(self):
manager = EventManager(make_event(
transaction='foobar',
))
manager.normalize()
event = manager.save(1)
assert event.transaction == 'foobar'
assert event.culprit == 'foobar'
def test_culprit_is_not_transaction(self):
manager = EventManager(make_event(
culprit='foobar',
))
manager.normalize()
event1 = manager.save(1)
assert event1.transaction is None
assert event1.culprit == 'foobar'
def test_transaction_and_culprit(self):
manager = EventManager(make_event(
transaction='foobar',
culprit='baz',
))
manager.normalize()
event1 = manager.save(1)
assert event1.transaction == 'foobar'
assert event1.culprit == 'baz'
def test_first_release(self):
project_id = 1
event = self.make_release_event('1.0', project_id)
group = event.group
assert group.first_release.version == '1.0'
event = self.make_release_event('2.0', project_id)
group = event.group
assert group.first_release.version == '1.0'
def test_release_project_slug(self):
project = self.create_project(name='foo')
release = Release.objects.create(version='foo-1.0', organization=project.organization)
release.add_project(project)
event = self.make_release_event('1.0', project.id)
group = event.group
assert group.first_release.version == 'foo-1.0'
release_tag = [v for k, v in event.tags if k == 'sentry:release'][0]
assert release_tag == 'foo-1.0'
event = self.make_release_event('2.0', project.id)
group = event.group
assert group.first_release.version == 'foo-1.0'
def test_release_project_slug_long(self):
project = self.create_project(name='foo')
partial_version_len = VERSION_LENGTH - 4
release = Release.objects.create(
version='foo-%s' % ('a' * partial_version_len, ), organization=project.organization
)
release.add_project(project)
event = self.make_release_event('a' * partial_version_len, project.id)
group = event.group
assert group.first_release.version == 'foo-%s' % ('a' * partial_version_len, )
release_tag = [v for k, v in event.tags if k == 'sentry:release'][0]
assert release_tag == 'foo-%s' % ('a' * partial_version_len, )
def test_group_release_no_env(self):
project_id = 1
event = self.make_release_event('1.0', project_id)
release = Release.objects.get(version='1.0', projects=event.project_id)
assert GroupRelease.objects.filter(
release_id=release.id,
group_id=event.group_id,
environment='',
).exists()
# ensure we're not erroring on second creation
event = self.make_release_event('1.0', project_id)
def test_group_release_with_env(self):
manager = EventManager(
make_event(release='1.0', environment='prod', event_id='a' * 32)
)
event = manager.save(1)
release = Release.objects.get(version='1.0', projects=event.project_id)
assert GroupRelease.objects.filter(
release_id=release.id,
group_id=event.group_id,
environment='prod',
).exists()
manager = EventManager(
make_event(release='1.0', environment='staging', event_id='b' * 32)
)
event = manager.save(1)
release = Release.objects.get(version='1.0', projects=event.project_id)
assert GroupRelease.objects.filter(
release_id=release.id,
group_id=event.group_id,
environment='staging',
).exists()
def test_tsdb(self):
project = self.project
manager = EventManager(make_event(
fingerprint=['totally unique super duper fingerprint'],
environment='totally unique super duper environment',
))
event = manager.save(project.id)
def query(model, key, **kwargs):
return tsdb.get_sums(model, [key], event.datetime, event.datetime, **kwargs)[key]
assert query(tsdb.models.project, project.id) == 1
assert query(tsdb.models.group, event.group.id) == 1
environment_id = Environment.get_for_organization_id(
event.project.organization_id,
'totally unique super duper environment',
).id
assert query(tsdb.models.project, project.id, environment_id=environment_id) == 1
assert query(tsdb.models.group, event.group.id, environment_id=environment_id) == 1
@pytest.mark.xfail
def test_record_frequencies(self):
project = self.project
manager = EventManager(make_event())
event = manager.save(project.id)
assert tsdb.get_most_frequent(
tsdb.models.frequent_issues_by_project,
(event.project.id, ),
event.datetime,
) == {
event.project.id: [
(event.group_id, 1.0),
],
}
assert tsdb.get_most_frequent(
tsdb.models.frequent_projects_by_organization,
(event.project.organization_id, ),
event.datetime,
) == {
event.project.organization_id: [
(event.project_id, 1.0),
],
}
def test_event_user(self):
manager = EventManager(make_event(
event_id='a',
environment='totally unique environment',
**{'sentry.interfaces.User': {
'id': '1',
}}
))
manager.normalize()
with self.tasks():
event = manager.save(self.project.id)
environment_id = Environment.get_for_organization_id(
event.project.organization_id,
'totally unique environment',
).id
assert tsdb.get_distinct_counts_totals(
tsdb.models.users_affected_by_group,
(event.group.id, ),
event.datetime,
event.datetime,
) == {
event.group.id: 1,
}
assert tsdb.get_distinct_counts_totals(
tsdb.models.users_affected_by_project,
(event.project.id, ),
event.datetime,
event.datetime,
) == {
event.project.id: 1,
}
assert tsdb.get_distinct_counts_totals(
tsdb.models.users_affected_by_group,
(event.group.id, ),
event.datetime,
event.datetime,
environment_id=environment_id,
) == {
event.group.id: 1,
}
assert tsdb.get_distinct_counts_totals(
tsdb.models.users_affected_by_project,
(event.project.id, ),
event.datetime,
event.datetime,
environment_id=environment_id,
) == {
event.project.id: 1,
}
euser = EventUser.objects.get(
project_id=self.project.id,
ident='1',
)
assert event.get_tag('sentry:user') == euser.tag_value
# ensure event user is mapped to tags in second attempt
manager = EventManager(
make_event(
event_id='b',
**{'sentry.interfaces.User': {
'id': '1',
'name': 'jane',
}}
)
)
manager.normalize()
with self.tasks():
event = manager.save(self.project.id)
euser = EventUser.objects.get(id=euser.id)
assert event.get_tag('sentry:user') == euser.tag_value
assert euser.name == 'jane'
assert euser.ident == '1'
def test_event_user_unicode_identifier(self):
manager = EventManager(make_event(**{'sentry.interfaces.User': {'username': u'foô'}}))
manager.normalize()
with self.tasks():
manager.save(self.project.id)
euser = EventUser.objects.get(
project_id=self.project.id,
)
assert euser.username == u'foô'
def test_environment(self):
manager = EventManager(make_event(**{
'environment': 'beta',
}))
manager.normalize()
event = manager.save(self.project.id)
assert dict(event.tags).get('environment') == 'beta'
def test_invalid_environment(self):
manager = EventManager(make_event(**{
'environment': 'bad/name',
}))
manager.normalize()
event = manager.save(self.project.id)
assert dict(event.tags).get('environment') is None
@mock.patch('sentry.event_manager.eventstream.insert')
def test_group_environment(self, eventstream_insert):
release_version = '1.0'
def save_event():
manager = EventManager(make_event(**{
'event_id': uuid.uuid1().hex, # don't deduplicate
'environment': 'beta',
'release': release_version,
}))
manager.normalize()
return manager.save(self.project.id)
event = save_event()
# Ensure the `GroupEnvironment` record was created.
instance = GroupEnvironment.objects.get(
group_id=event.group_id,
environment_id=Environment.objects.get(
organization_id=self.project.organization_id,
name=event.get_tag('environment'),
).id,
)
assert Release.objects.get(id=instance.first_release_id).version == release_version
# Ensure that the first event in the (group, environment) pair is
# marked as being part of a new environment.
eventstream_insert.assert_called_with(
group=event.group,
event=event,
is_new=True,
is_sample=False,
is_regression=False,
is_new_group_environment=True,
primary_hash='acbd18db4cc2f85cedef654fccc4a4d8',
skip_consume=False,
)
event = save_event()
# Ensure that the next event in the (group, environment) pair is *not*
# marked as being part of a new environment.
eventstream_insert.assert_called_with(
group=event.group,
event=event,
is_new=False,
is_sample=False,
is_regression=None, # XXX: wut
is_new_group_environment=False,
primary_hash='acbd18db4cc2f85cedef654fccc4a4d8',
skip_consume=False,
)
def test_default_fingerprint(self):
manager = EventManager(make_event())
manager.normalize()
event = manager.save(self.project.id)
assert event.data.get('fingerprint') == ['{{ default }}']
def test_user_report_gets_environment(self):
project = self.create_project()
environment = Environment.objects.create(
project_id=project.id,
organization_id=project.organization_id,
name='production',
)
environment.add_project(project)
event_id = 'a' * 32
group = self.create_group(project=project)
UserReport.objects.create(
group=group,
project=project,
event_id=event_id,
name='foo',
email='[email protected]',
comments='It Broke!!!',
)
manager = EventManager(
make_event(
environment=environment.name,
event_id=event_id,
group=group))
manager.normalize()
manager.save(project.id)
assert UserReport.objects.get(event_id=event_id).environment == environment
def test_default_event_type(self):
manager = EventManager(make_event(message='foo bar'))
manager.normalize()
data = manager.get_data()
assert data['type'] == 'default'
event = manager.save(self.project.id)
group = event.group
assert group.data.get('type') == 'default'
assert group.data.get('metadata') == {
'title': 'foo bar',
}
def test_message_event_type(self):
manager = EventManager(
make_event(
**{
'message': '',
'sentry.interfaces.Message': {
'formatted': 'foo bar',
'message': 'foo %s',
'params': ['bar'],
}
}
)
)
manager.normalize()
data = manager.get_data()
assert data['type'] == 'default'
event = manager.save(self.project.id)
group = event.group
assert group.data.get('type') == 'default'
assert group.data.get('metadata') == {
'title': 'foo bar',
}
def test_error_event_type(self):
manager = EventManager(
make_event(
**{
'sentry.interfaces.Exception': {
'values': [{
'type': 'Foo',
'value': 'bar',
}],
},
}
)
)
manager.normalize()
data = manager.get_data()
assert data['type'] == 'error'
event = manager.save(self.project.id)
group = event.group
assert group.data.get('type') == 'error'
assert group.data.get('metadata') == {
'type': 'Foo',
'value': 'bar',
}
def test_csp_event_type(self):
manager = EventManager(
make_event(
**{
'sentry.interfaces.Csp': {
'effective_directive': 'script-src',
'blocked_uri': 'http://example.com',
},
}
)
)
manager.normalize()
data = manager.get_data()
assert data['type'] == 'csp'
event = manager.save(self.project.id)
group = event.group
assert group.data.get('type') == 'csp'
assert group.data.get('metadata') == {
'directive': 'script-src',
'uri': 'example.com',
'message': "Blocked 'script' from 'example.com'",
}
def test_sdk(self):
manager = EventManager(
make_event(**{
'sdk': {
'name': 'sentry-unity',
'version': '1.0',
},
})
)
manager.normalize()
event = manager.save(self.project.id)
assert event.data['sdk'] == {
'name': 'sentry-unity',
'version': '1.0',
}
def test_no_message(self):
# test that the message is handled gracefully
manager = EventManager(
make_event(
**{
'message': None,
'sentry.interfaces.Message': {
'message': 'hello world',
},
}
)
)
manager.normalize()
event = manager.save(self.project.id)
assert event.message == 'hello world'
def test_bad_message(self):
# test that the message is handled gracefully
manager = EventManager(make_event(**{
'message': 1234,
}))
manager.normalize()
event = manager.save(self.project.id)
assert event.message == '1234'
assert event.data['sentry.interfaces.Message'] == {
'message': '1234',
}
def test_message_attribute_goes_to_interface(self):
manager = EventManager(make_event(**{
'message': 'hello world',
}))
manager.normalize()
event = manager.save(self.project.id)
assert event.data['sentry.interfaces.Message'] == {
'message': 'hello world',
}
def test_message_attribute_goes_to_formatted(self):
# The combining of 'message' and 'sentry.interfaces.Message' is a bit
# of a compatibility hack, and ideally we would just enforce a stricter
# schema instead of combining them like this.
manager = EventManager(
make_event(
**{
'message': 'world hello',
'sentry.interfaces.Message': {
'message': 'hello world',
},
}
)
)
manager.normalize()
event = manager.save(self.project.id)
assert event.data['sentry.interfaces.Message'] == {
'message': 'hello world',
'formatted': 'world hello',
}
def test_message_attribute_interface_both_strings(self):
manager = EventManager(
make_event(
**{
'sentry.interfaces.Message': 'a plain string',
'message': 'another string',
}
)
)
manager.normalize()
event = manager.save(self.project.id)
assert event.data['sentry.interfaces.Message'] == {
'message': 'a plain string',
'formatted': 'another string',
}
def test_throws_when_matches_discarded_hash(self):
manager = EventManager(
make_event(
message='foo',
event_id='a' * 32,
fingerprint=['a' * 32],
)
)
with self.tasks():
event = manager.save(1)
group = Group.objects.get(id=event.group_id)
tombstone = GroupTombstone.objects.create(
project_id=group.project_id,
level=group.level,
message=group.message,
culprit=group.culprit,
data=group.data,
previous_group_id=group.id,
)
GroupHash.objects.filter(
group=group,
).update(
group=None,
group_tombstone_id=tombstone.id,
)
manager = EventManager(
make_event(
message='foo',
event_id='b' * 32,
fingerprint=['a' * 32],
)
)
mock_event_discarded = mock.Mock()
event_discarded.connect(mock_event_discarded)
mock_event_saved = mock.Mock()
event_saved.connect(mock_event_saved)
with self.tasks():
with self.assertRaises(HashDiscarded):
event = manager.save(1)
assert not mock_event_saved.called
assert_mock_called_once_with_partial(
mock_event_discarded,
project=group.project,
sender=EventManager,
signal=event_discarded,
)
def test_event_saved_signal(self):
mock_event_saved = mock.Mock()
event_saved.connect(mock_event_saved)
manager = EventManager(make_event(message='foo'))
manager.normalize()
event = manager.save(1)
assert_mock_called_once_with_partial(
mock_event_saved,
project=event.group.project,
sender=EventManager,
signal=event_saved,
)
def test_checksum_rehashed(self):
checksum = 'invalid checksum hash'
manager = EventManager(
make_event(**{
'checksum': checksum,
})
)
manager.normalize()
event = manager.save(self.project.id)
hashes = [gh.hash for gh in GroupHash.objects.filter(group=event.group)]
assert hashes == [md5_from_hash(checksum), checksum]
@mock.patch('sentry.event_manager.is_valid_error_message')
def test_should_filter_message(self, mock_is_valid_error_message):
TestItem = namedtuple('TestItem', 'value formatted result')
items = [
TestItem(
{'type': 'UnfilteredException'},
'UnfilteredException',
True,
),
TestItem(
{'value': 'This is an unfiltered exception.'},
'This is an unfiltered exception.',
True,
),
TestItem(
{'type': 'UnfilteredException', 'value': 'This is an unfiltered exception.'},
'UnfilteredException: This is an unfiltered exception.',
True,
),
TestItem(
{'type': 'FilteredException', 'value': 'This is a filtered exception.'},
'FilteredException: This is a filtered exception.',
False,
),
]
data = {
'sentry.interfaces.Exception': {
'values': [item.value for item in items]
},
}
manager = EventManager(data, project=self.project)
mock_is_valid_error_message.side_effect = [item.result for item in items]
assert manager.should_filter() == (True, FilterStatKeys.ERROR_MESSAGE)
assert mock_is_valid_error_message.call_args_list == [
mock.call(self.project, item.formatted) for item in items]
class ReleaseIssueTest(TransactionTestCase):
def setUp(self):
self.project = self.create_project()
self.release = Release.get_or_create(self.project, '1.0')
self.environment1 = Environment.get_or_create(self.project, 'prod')
self.environment2 = Environment.get_or_create(self.project, 'staging')
self.timestamp = 1403007314
def make_event(self, **kwargs):
result = {
'event_id': 'a' * 32,
'message': 'foo',
'timestamp': 1403007314.570599,
'level': logging.ERROR,
'logger': 'default',
'tags': [],
}
result.update(kwargs)
return result
def make_release_event(self, release_version='1.0',
environment_name='prod', project_id=1, **kwargs):
event = make_event(
release=release_version,
environment=environment_name,
event_id=uuid.uuid1().hex,
)
event.update(kwargs)
manager = EventManager(event)
with self.tasks():
event = manager.save(project_id)
return event
def convert_timestamp(self, timestamp):
date = datetime.fromtimestamp(timestamp)
date = date.replace(tzinfo=timezone.utc)
return date
def assert_release_project_environment(self, event, new_issues_count, first_seen, last_seen):
release = Release.objects.get(
organization=event.project.organization.id,
version=event.get_tag('sentry:release'),
)
release_project_envs = ReleaseProjectEnvironment.objects.filter(
release=release,
project=event.project,
environment=event.get_environment(),
)
assert len(release_project_envs) == 1
release_project_env = release_project_envs[0]
assert release_project_env.new_issues_count == new_issues_count
assert release_project_env.first_seen == self.convert_timestamp(first_seen)
assert release_project_env.last_seen == self.convert_timestamp(last_seen)
def test_different_groups(self):
event1 = self.make_release_event(
release_version=self.release.version,
environment_name=self.environment1.name,
project_id=self.project.id,
checksum='a' * 32,
timestamp=self.timestamp,
)
self.assert_release_project_environment(
event=event1,
new_issues_count=1,
last_seen=self.timestamp,
first_seen=self.timestamp,
)
event2 = self.make_release_event(
release_version=self.release.version,
environment_name=self.environment1.name,
project_id=self.project.id,
checksum='b' * 32,
timestamp=self.timestamp + 100,
)
self.assert_release_project_environment(
event=event2,
new_issues_count=2,
last_seen=self.timestamp + 100,
first_seen=self.timestamp,
)
def test_same_group(self):
event1 = self.make_release_event(
release_version=self.release.version,
environment_name=self.environment1.name,
project_id=self.project.id,
checksum='a' * 32,
timestamp=self.timestamp,
)
self.assert_release_project_environment(
event=event1,
new_issues_count=1,
last_seen=self.timestamp,
first_seen=self.timestamp,
)
event2 = self.make_release_event(
release_version=self.release.version,
environment_name=self.environment1.name,
project_id=self.project.id,
checksum='a' * 32,
timestamp=self.timestamp + 100,
)
self.assert_release_project_environment(
event=event2,
new_issues_count=1,
last_seen=self.timestamp + 100,
first_seen=self.timestamp,
)
def test_same_group_different_environment(self):
event1 = self.make_release_event(
release_version=self.release.version,
environment_name=self.environment1.name,
project_id=self.project.id,
checksum='a' * 32,
timestamp=self.timestamp,
)
self.assert_release_project_environment(
event=event1,
new_issues_count=1,
last_seen=self.timestamp,
first_seen=self.timestamp,
)
event2 = self.make_release_event(
release_version=self.release.version,
environment_name=self.environment2.name,
project_id=self.project.id,
checksum='a' * 32,
timestamp=self.timestamp + 100,
)
self.assert_release_project_environment(
event=event1,
new_issues_count=1,
last_seen=self.timestamp,
first_seen=self.timestamp,
)
self.assert_release_project_environment(
event=event2,
new_issues_count=1,
last_seen=self.timestamp + 100,
first_seen=self.timestamp + 100,
)
|
py | 1a4be6af6b13279e5091f2da4ff7174bb62f7fe4 | try:
from django.contrib.auth import get_user_model as auth_get_user_model
except ImportError:
auth_get_user_model = None
from django.contrib.auth.models import User
from account.conf import settings
AUTH_USER_MODEL = getattr(settings, "AUTH_USER_MODEL", "auth.User")
def get_user_model(*args, **kwargs):
if auth_get_user_model is not None:
return auth_get_user_model(*args, **kwargs)
else:
return User
def get_user_lookup_kwargs(kwargs):
result = {}
username_field = getattr(get_user_model(), "USERNAME_FIELD", "username")
for key, value in kwargs.iteritems():
result[key.format(username=username_field)] = value
return result
|
py | 1a4be7e35e32d612ae500ba5f92a9b048bf5b04a | from ...Core.registers import Registers
from ...Core.commands import Commands
from ...Core.types import Types
from ...Runtime.gc import GC
""" Map: arithmetic operator in programming language = arithmetic operator in ASM """
binop_compare_map = {
'+': {
'operator': Commands.ADD,
'operands': [Registers.EAX, Registers.EBX]
},
'-': {
'operator': Commands.SUB,
'operands': [Registers.EAX, Registers.EBX]
},
'*': {
'operator': Commands.MUL,
'operands': [Registers.EBX]
},
'/': {
'operator': Commands.IDIV,
'operands': [Registers.EBX]
},
'%': {
'operator': Commands.IDIV,
'operands': [Registers.EBX]
}
}
def int_aexp(compiler, node):
""" Integer compilation """
compiler.code.add(Commands.MOV, [Registers.EAX, node.i])\
.add(Commands.PUSH, Registers.EAX)
return compiler.types.set(Types.INT)
def binop_aexp(compiler, node):
""" Arithmetic expression compilation """
node.left.compile_asm(compiler)
compiler.types.pop()
node.right.compile_asm(compiler)
compiler.types.pop()
compiler.code.add(Commands.POP, Registers.EBX)\
.add(Commands.POP, Registers.EAX)
if node.op == '/' or node.op == '%':
compiler.code.add(Commands.CDQ)
compiler.code.add(binop_compare_map[node.op]['operator'], binop_compare_map[node.op]['operands'])
if node.op == '%':
compiler.code.add(Commands.MOV, [Registers.EAX, Registers.EDX])
compiler.code.add(Commands.PUSH, Registers.EAX)
return compiler.types.set(Types.INT)
def var_aexp(compiler, node):
""" Variable compilation """
if node.context == 'assign':
gc = GC(compiler)
if compiler.environment.is_exist_local_var(node.name):
var = compiler.environment.get_local_var(node.name)
var_type = compiler.environment.get_local_var_runtime_type(node.name)
compiler.code.add(Commands.MOV, [Registers.EAX, 'dword [%s]' % Registers.ESP])
compiler.code.add(Commands.MOV, [var_type, Registers.EAX])
compiler.environment.update_local_var_type(node.name, node.type)
compiler.code.add(Commands.MOV, [Registers.EAX, var])
compiler.code.add(Commands.MOV, [Registers.EBX, var_type])
gc.decrement()
else:
var = compiler.environment.add_local_var(node.type, node.name)
var_type = compiler.environment.get_local_var_runtime_type(node.name)
if compiler.environment.defined_object is not None:
compiler.environment.set_link_object(var, compiler.environment.defined_object)
compiler.environment.defined_object = None
compiler.code.add(Commands.MOV, [Registers.EAX, 'dword [%s + 4]' % Registers.ESP])
compiler.code.add(Commands.MOV, [Registers.EBX, 'dword [%s]' % Registers.ESP])
gc.increment()
compiler.code.add(Commands.POP, var_type)
compiler.code.add(Commands.POP, var)
else:
compiler.code.add(Commands.MOV, [Registers.EAX, compiler.environment.get_local_var(node.name)])\
.add(Commands.PUSH, Registers.EAX)
runtime_var_type = compiler.environment.get_local_var_runtime_type(node.name)
compiler.types.set(runtime_var_type)
var_type = compiler.environment.get_local_var_type(node.name)
return var_type
|
py | 1a4be8720a499967e042c0d3a15c95f0a66fdb4e | from django.db import models
from django.urls import reverse
class Post(models.Model):
title = models.CharField(
verbose_name='title',
max_length=255,
help_text="The page title as you'd like it to be seen by the public",
)
body = models.TextField(
verbose_name='content body',
)
created = models.DateTimeField(
verbose_name='created time',
auto_now_add=True,
)
class Meta:
ordering = ['-created']
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post-detail', args=[self.pk, ])
class AbstractAttachment(models.Model):
name = models.CharField(
max_length=255,
null=True,
blank=True,
verbose_name='file name',
help_text="Defaults to filename, if left blank",
)
file = models.ImageField(
verbose_name='uploaded file',
upload_to="attachment",
)
created = models.DateTimeField(
verbose_name='created time',
auto_now_add=True,
)
class Meta:
abstract = True
class Attachment(AbstractAttachment):
post = models.ForeignKey(
'fileupload.Post',
verbose_name='post',
related_name='attachments',
on_delete=models.CASCADE,
blank=True,
null=True,
)
class Meta:
verbose_name = 'attachment'
verbose_name_plural = 'attachments'
ordering = ['-created']
def __str__(self):
return self.name
|
py | 1a4be908d7b1808870e195fa97af99f5a257a11d | from pbpstats.resources.enhanced_pbp import FieldGoal
from pbpstats.resources.enhanced_pbp.stats_nba.enhanced_pbp_item import (
StatsEnhancedPbpItem,
)
class StatsFieldGoal(FieldGoal, StatsEnhancedPbpItem):
"""
Class for field goal events
"""
event_type = [1, 2]
def __init__(self, *args):
super().__init__(*args)
@property
def is_made(self):
"""
returns True if shot was made, False otherwise
"""
return self.event_type == 1
@property
def shot_value(self):
"""
returns 3 if shot is a 3 point attempt, 2 otherwise
"""
return 3 if " 3PT " in self.description else 2
def get_offense_team_id(self):
"""
returns team id that took the shot
"""
return self.team_id
|
py | 1a4be914e75fbda4e1af232dc3e82ca2d77e53b1 | from tkinter import *
import random
class Window:
def __init__(self, master):
self.master = master
self.guess_number = None
self.cows = 0
self.bulls = 0
master.title("Bulls and Cows")
self.label = Label(master, text="Let`s play Bulls and Cows game!")
self.label.grid(row=0, column=0, columnspan=2, sticky=W + E)
self.startBut = Button(master, text="Start game", command=self.start, state=NORMAL)
self.startBut.grid(row=1, column=0)
self.closeBut = Button(master, text="Close", command=master.quit)
self.closeBut.grid(row=1, column=2)
self.helpBut = Button(master, text='help', command=self.help)
self.helpBut.grid(row=1, column=1)
vcmd = master.register(self.str_checking) # we have to wrap the command
self.entry = Entry(master, validate="key", validatecommand=(vcmd, '%P'))
def start(self):
g_numb = []
while len(g_numb) <= 3:
rand_numb = random.randint(0, 9)
if rand_numb not in g_numb: g_numb.append(rand_numb)
self.g_numb = g_numb
vcmd = self.master.register(self.str_checking) # we have to wrap the command
self.entry = Entry(self.master, validate="key", validatecommand=(vcmd, '%P'))
self.entry.grid(row=2, column=0, columnspan=2, sticky=W + E)
print(g_numb)
self.comBut = Button(self.master, text='Try It', command=self.bulls_cows)
self.comBut.grid(row=3, column=0, columnspan=2, sticky=W + E)
self.startBut.configure(state=DISABLED)
return g_numb
def str_checking(self, input_numbers):
if not input_numbers:
self.guess_number = None
return True
try:
guess_number = int(input_numbers)
if 0 <= guess_number < 9876:
self.guess_number = guess_number
return True
else:
return False
except ValueError:
return False
def bulls_cows(self):
print(self.guess_number)
if not type(self.guess_number) is list:
self.guess_number = [int(number) for number in str(self.guess_number)]
self.cows = 0
for tip_i in self.guess_number:
for guess_i in self.g_numb:
if tip_i == guess_i:
self.cows += 1
self.bulls = 0
for index, _ in enumerate(self.g_numb):
if self.guess_number[index] == self.g_numb[index]:
self.bulls += 1
self.cows -= 1
if self.bulls == 4:
self.message = "Congratulations, You guessed all 4 bulls!\n" \
"Do you want play another game?"
self.resBut = Button(self.master, text='Play again', command=self.reset)
self.resBut.grid(row=5, column=0, columnspan=2, sticky=W + E)
else:
self.message = f"You guessed {self.bulls} bulls and {self.cows} cows."
self.label_text = StringVar()
self.label_text.set(self.message)
self.label = Label(self.master, textvariable=self.label_text)
self.label.grid(row=4, column=0, columnspan=2, sticky=W + E)
def reset(self):
self.message = ""
self.label_text.set(self.message)
self.start()
def help(self):
help_win = Toplevel(root)
help_win.title('Manual')
help_win.geometry("640x400")
display = Label(help_win, text="""The numerical version of the game is usually played with 4 digits, but can
also be played with 3 or any other number of digits.\n
On a sheet of paper, the players each write a 4-digit secret number. The digits must be all different. Then, in turn,
the players try to guess their opponent's number who gives the number of matches. If the matching digits are in their
right positions, they are "bulls", if in different positions, they are "cows". Example:\n
Secret number: 4271\n
Opponent's try: 1234\n
Answer: 1 bull and 2 cows. (The bull is "2", the cows are "4" and "1".)\n
The first one to reveal the other's secret number in the least number of guesses wins the game.\n
The game may also be played by two teams of players, with the team members discussing their strategy\n
before selecting a move.
A computer program moo, written in 1970 by J. M. Grochow at MIT in the PL/I computer language for the Multics \n
operating system, was amongst the first Bulls and Cows computer implementations, inspired by a similar program written \n
by Frank King in 1968 and running on the Cambridge University mainframe. Because the game has simple rules, \n
while it is difficult and entertaining, there are many computer variants; it is often included in telephones and PDAs.
It is proven that any number could be solved within seven turns. \n
Minimal average game length is 26274/5040=5.2131 turns
https://en.wikipedia.org/wiki/Bulls_and_Cows""")
display.pack()
root = Tk()
app = Window(root)
root.mainloop()
|
py | 1a4bea274aca87f35a2e6c36ca14e6bf94a770dc | import unittest
import os
from checkov.json_doc.runner import Runner
from checkov.runner_filter import RunnerFilter
class TestRunnerValid(unittest.TestCase):
def test_runner_object_failing_check(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = os.path.join(current_dir, "resources", "object", "fail")
checks_dir = os.path.join(current_dir, "checks", "object")
runner = Runner()
checks = ["CKV_FOO_1", "CKV_FOO_2"]
report = runner.run(
root_folder=valid_dir_path,
external_checks_dir=[checks_dir],
runner_filter=RunnerFilter(framework='all', checks=checks)
)
self.assertEqual(len(report.failed_checks), 3)
self.assertEqual(report.parsing_errors, [])
self.assertEqual(len(report.passed_checks), 2)
self.assertEqual(report.skipped_checks, [])
report.print_console()
def test_runner_object_passing_check(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = os.path.join(current_dir, "resources", "object", "pass")
checks_dir = os.path.join(current_dir, "checks", "object")
runner = Runner()
report = runner.run(
root_folder=valid_dir_path,
external_checks_dir=[checks_dir],
runner_filter=RunnerFilter(framework="all", checks=["CKV_FOO_1"]),
)
self.assertEqual(len(report.passed_checks), 1)
self.assertEqual(report.parsing_errors, [])
self.assertEqual(report.failed_checks, [])
self.assertEqual(report.skipped_checks, [])
report.print_console()
def test_runner_array_failing_check(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = os.path.join(current_dir, "resources", "array", "fail")
checks_dir = os.path.join(current_dir, "checks", "array")
runner = Runner()
report = runner.run(
root_folder=valid_dir_path,
external_checks_dir=[checks_dir],
runner_filter=RunnerFilter(framework='all', checks=["CKV_BARBAZ_1"])
)
self.assertEqual(len(report.failed_checks), 2)
self.assertEqual(report.parsing_errors, [])
self.assertEqual(report.passed_checks, [])
self.assertEqual(report.skipped_checks, [])
report.print_console()
def test_runner_array_passing_check(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = os.path.join(current_dir, "resources", "array", "pass")
checks_dir = os.path.join(current_dir, "checks", "array")
runner = Runner()
report = runner.run(
root_folder=valid_dir_path,
external_checks_dir=[checks_dir],
runner_filter=RunnerFilter(framework="all", checks=["CKV_BARBAZ_1"]),
)
self.assertEqual(len(report.passed_checks), 1)
self.assertEqual(report.parsing_errors, [])
self.assertEqual(report.failed_checks, [])
self.assertEqual(report.skipped_checks, [])
report.print_console()
def test_runner_complex_failing_check(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = os.path.join(current_dir, "resources", "complex", "fail")
checks_dir = os.path.join(current_dir, "checks", "complex")
runner = Runner()
report = runner.run(
root_folder=valid_dir_path,
external_checks_dir=[checks_dir],
runner_filter=RunnerFilter(framework='all', checks=["CKV_COMPLEX_1"])
)
self.assertEqual(len(report.failed_checks), 1)
self.assertEqual(report.parsing_errors, [])
self.assertEqual(report.passed_checks, [])
self.assertEqual(report.skipped_checks, [])
report.print_console()
def test_runner_complex_passing_check(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = os.path.join(current_dir, "resources", "complex", "pass")
checks_dir = os.path.join(current_dir, "checks", "complex")
runner = Runner()
report = runner.run(
root_folder=valid_dir_path,
external_checks_dir=[checks_dir],
runner_filter=RunnerFilter(framework="all", checks=["CKV_COMPLEX_1"]),
)
self.assertEqual(len(report.passed_checks), 1)
self.assertEqual(report.parsing_errors, [])
self.assertEqual(report.failed_checks, [])
self.assertEqual(report.skipped_checks, [])
report.print_console()
if __name__ == "__main__":
unittest.main()
|
py | 1a4beb709b086d7a28c7e8325aa887022b3a9144 | # coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.amount_v30_rc2 import AmountV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.created_date_v30_rc2 import CreatedDateV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.external_i_ds_v30_rc2 import ExternalIDsV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.funding_contributors_v30_rc2 import FundingContributorsV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.funding_title_v30_rc2 import FundingTitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.fuzzy_date_v30_rc2 import FuzzyDateV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.last_modified_date_v30_rc2 import LastModifiedDateV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.organization_defined_funding_sub_type_v30_rc2 import OrganizationDefinedFundingSubTypeV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.organization_v30_rc2 import OrganizationV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.source_v30_rc2 import SourceV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.url_v30_rc2 import UrlV30Rc2 # noqa: F401,E501
class FundingV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'created_date': 'CreatedDateV30Rc2',
'last_modified_date': 'LastModifiedDateV30Rc2',
'source': 'SourceV30Rc2',
'put_code': 'int',
'path': 'str',
'type': 'str',
'organization_defined_type': 'OrganizationDefinedFundingSubTypeV30Rc2',
'title': 'FundingTitleV30Rc2',
'short_description': 'str',
'amount': 'AmountV30Rc2',
'url': 'UrlV30Rc2',
'start_date': 'FuzzyDateV30Rc2',
'end_date': 'FuzzyDateV30Rc2',
'external_ids': 'ExternalIDsV30Rc2',
'contributors': 'FundingContributorsV30Rc2',
'organization': 'OrganizationV30Rc2',
'visibility': 'str'
}
attribute_map = {
'created_date': 'created-date',
'last_modified_date': 'last-modified-date',
'source': 'source',
'put_code': 'put-code',
'path': 'path',
'type': 'type',
'organization_defined_type': 'organization-defined-type',
'title': 'title',
'short_description': 'short-description',
'amount': 'amount',
'url': 'url',
'start_date': 'start-date',
'end_date': 'end-date',
'external_ids': 'external-ids',
'contributors': 'contributors',
'organization': 'organization',
'visibility': 'visibility'
}
def __init__(self, created_date=None, last_modified_date=None, source=None, put_code=None, path=None, type=None, organization_defined_type=None, title=None, short_description=None, amount=None, url=None, start_date=None, end_date=None, external_ids=None, contributors=None, organization=None, visibility=None): # noqa: E501
"""FundingV30Rc2 - a model defined in Swagger""" # noqa: E501
self._created_date = None
self._last_modified_date = None
self._source = None
self._put_code = None
self._path = None
self._type = None
self._organization_defined_type = None
self._title = None
self._short_description = None
self._amount = None
self._url = None
self._start_date = None
self._end_date = None
self._external_ids = None
self._contributors = None
self._organization = None
self._visibility = None
self.discriminator = None
if created_date is not None:
self.created_date = created_date
if last_modified_date is not None:
self.last_modified_date = last_modified_date
if source is not None:
self.source = source
if put_code is not None:
self.put_code = put_code
if path is not None:
self.path = path
self.type = type
if organization_defined_type is not None:
self.organization_defined_type = organization_defined_type
self.title = title
if short_description is not None:
self.short_description = short_description
if amount is not None:
self.amount = amount
if url is not None:
self.url = url
if start_date is not None:
self.start_date = start_date
if end_date is not None:
self.end_date = end_date
if external_ids is not None:
self.external_ids = external_ids
if contributors is not None:
self.contributors = contributors
self.organization = organization
if visibility is not None:
self.visibility = visibility
@property
def created_date(self):
"""Gets the created_date of this FundingV30Rc2. # noqa: E501
:return: The created_date of this FundingV30Rc2. # noqa: E501
:rtype: CreatedDateV30Rc2
"""
return self._created_date
@created_date.setter
def created_date(self, created_date):
"""Sets the created_date of this FundingV30Rc2.
:param created_date: The created_date of this FundingV30Rc2. # noqa: E501
:type: CreatedDateV30Rc2
"""
self._created_date = created_date
@property
def last_modified_date(self):
"""Gets the last_modified_date of this FundingV30Rc2. # noqa: E501
:return: The last_modified_date of this FundingV30Rc2. # noqa: E501
:rtype: LastModifiedDateV30Rc2
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""Sets the last_modified_date of this FundingV30Rc2.
:param last_modified_date: The last_modified_date of this FundingV30Rc2. # noqa: E501
:type: LastModifiedDateV30Rc2
"""
self._last_modified_date = last_modified_date
@property
def source(self):
"""Gets the source of this FundingV30Rc2. # noqa: E501
:return: The source of this FundingV30Rc2. # noqa: E501
:rtype: SourceV30Rc2
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this FundingV30Rc2.
:param source: The source of this FundingV30Rc2. # noqa: E501
:type: SourceV30Rc2
"""
self._source = source
@property
def put_code(self):
"""Gets the put_code of this FundingV30Rc2. # noqa: E501
:return: The put_code of this FundingV30Rc2. # noqa: E501
:rtype: int
"""
return self._put_code
@put_code.setter
def put_code(self, put_code):
"""Sets the put_code of this FundingV30Rc2.
:param put_code: The put_code of this FundingV30Rc2. # noqa: E501
:type: int
"""
self._put_code = put_code
@property
def path(self):
"""Gets the path of this FundingV30Rc2. # noqa: E501
:return: The path of this FundingV30Rc2. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this FundingV30Rc2.
:param path: The path of this FundingV30Rc2. # noqa: E501
:type: str
"""
self._path = path
@property
def type(self):
"""Gets the type of this FundingV30Rc2. # noqa: E501
:return: The type of this FundingV30Rc2. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this FundingV30Rc2.
:param type: The type of this FundingV30Rc2. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
allowed_values = ["GRANT", "CONTRACT", "AWARD", "SALARY_AWARD"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def organization_defined_type(self):
"""Gets the organization_defined_type of this FundingV30Rc2. # noqa: E501
:return: The organization_defined_type of this FundingV30Rc2. # noqa: E501
:rtype: OrganizationDefinedFundingSubTypeV30Rc2
"""
return self._organization_defined_type
@organization_defined_type.setter
def organization_defined_type(self, organization_defined_type):
"""Sets the organization_defined_type of this FundingV30Rc2.
:param organization_defined_type: The organization_defined_type of this FundingV30Rc2. # noqa: E501
:type: OrganizationDefinedFundingSubTypeV30Rc2
"""
self._organization_defined_type = organization_defined_type
@property
def title(self):
"""Gets the title of this FundingV30Rc2. # noqa: E501
:return: The title of this FundingV30Rc2. # noqa: E501
:rtype: FundingTitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this FundingV30Rc2.
:param title: The title of this FundingV30Rc2. # noqa: E501
:type: FundingTitleV30Rc2
"""
if title is None:
raise ValueError("Invalid value for `title`, must not be `None`") # noqa: E501
self._title = title
@property
def short_description(self):
"""Gets the short_description of this FundingV30Rc2. # noqa: E501
:return: The short_description of this FundingV30Rc2. # noqa: E501
:rtype: str
"""
return self._short_description
@short_description.setter
def short_description(self, short_description):
"""Sets the short_description of this FundingV30Rc2.
:param short_description: The short_description of this FundingV30Rc2. # noqa: E501
:type: str
"""
self._short_description = short_description
@property
def amount(self):
"""Gets the amount of this FundingV30Rc2. # noqa: E501
:return: The amount of this FundingV30Rc2. # noqa: E501
:rtype: AmountV30Rc2
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this FundingV30Rc2.
:param amount: The amount of this FundingV30Rc2. # noqa: E501
:type: AmountV30Rc2
"""
self._amount = amount
@property
def url(self):
"""Gets the url of this FundingV30Rc2. # noqa: E501
:return: The url of this FundingV30Rc2. # noqa: E501
:rtype: UrlV30Rc2
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this FundingV30Rc2.
:param url: The url of this FundingV30Rc2. # noqa: E501
:type: UrlV30Rc2
"""
self._url = url
@property
def start_date(self):
"""Gets the start_date of this FundingV30Rc2. # noqa: E501
:return: The start_date of this FundingV30Rc2. # noqa: E501
:rtype: FuzzyDateV30Rc2
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this FundingV30Rc2.
:param start_date: The start_date of this FundingV30Rc2. # noqa: E501
:type: FuzzyDateV30Rc2
"""
self._start_date = start_date
@property
def end_date(self):
"""Gets the end_date of this FundingV30Rc2. # noqa: E501
:return: The end_date of this FundingV30Rc2. # noqa: E501
:rtype: FuzzyDateV30Rc2
"""
return self._end_date
@end_date.setter
def end_date(self, end_date):
"""Sets the end_date of this FundingV30Rc2.
:param end_date: The end_date of this FundingV30Rc2. # noqa: E501
:type: FuzzyDateV30Rc2
"""
self._end_date = end_date
@property
def external_ids(self):
"""Gets the external_ids of this FundingV30Rc2. # noqa: E501
:return: The external_ids of this FundingV30Rc2. # noqa: E501
:rtype: ExternalIDsV30Rc2
"""
return self._external_ids
@external_ids.setter
def external_ids(self, external_ids):
"""Sets the external_ids of this FundingV30Rc2.
:param external_ids: The external_ids of this FundingV30Rc2. # noqa: E501
:type: ExternalIDsV30Rc2
"""
self._external_ids = external_ids
@property
def contributors(self):
"""Gets the contributors of this FundingV30Rc2. # noqa: E501
:return: The contributors of this FundingV30Rc2. # noqa: E501
:rtype: FundingContributorsV30Rc2
"""
return self._contributors
@contributors.setter
def contributors(self, contributors):
"""Sets the contributors of this FundingV30Rc2.
:param contributors: The contributors of this FundingV30Rc2. # noqa: E501
:type: FundingContributorsV30Rc2
"""
self._contributors = contributors
@property
def organization(self):
"""Gets the organization of this FundingV30Rc2. # noqa: E501
:return: The organization of this FundingV30Rc2. # noqa: E501
:rtype: OrganizationV30Rc2
"""
return self._organization
@organization.setter
def organization(self, organization):
"""Sets the organization of this FundingV30Rc2.
:param organization: The organization of this FundingV30Rc2. # noqa: E501
:type: OrganizationV30Rc2
"""
if organization is None:
raise ValueError("Invalid value for `organization`, must not be `None`") # noqa: E501
self._organization = organization
@property
def visibility(self):
"""Gets the visibility of this FundingV30Rc2. # noqa: E501
:return: The visibility of this FundingV30Rc2. # noqa: E501
:rtype: str
"""
return self._visibility
@visibility.setter
def visibility(self, visibility):
"""Sets the visibility of this FundingV30Rc2.
:param visibility: The visibility of this FundingV30Rc2. # noqa: E501
:type: str
"""
allowed_values = ["LIMITED", "REGISTERED_ONLY", "PUBLIC", "PRIVATE"] # noqa: E501
if visibility not in allowed_values:
raise ValueError(
"Invalid value for `visibility` ({0}), must be one of {1}" # noqa: E501
.format(visibility, allowed_values)
)
self._visibility = visibility
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FundingV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FundingV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a4bebb3bb8542a8908ce457c79e1e3247dc3c98 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import Callable
import kfp
from kfp import LocalClient, run_pipeline_func_locally
InputPath = kfp.components.InputPath()
OutputPath = kfp.components.OutputPath()
BASE_IMAGE = "python:3.7"
def light_component(
base_image: str = BASE_IMAGE,
):
"""Decorator of kfp light component with customized parameters
Usage:
```python
@light_component(base_image="python:3.7")
def a_component(src: kfp.components.InputPath(), ...):
...
```
"""
def wrapper(func: Callable):
return kfp.components.create_component_from_func(
func=func,
base_image=base_image,
)
return wrapper
@light_component()
def hello(name: str):
print(f"hello {name}")
@light_component()
def local_loader(src: str, dst: kfp.components.OutputPath()):
import os
import shutil
if os.path.exists(src):
shutil.copyfile(src, dst)
@light_component()
def flip_coin(dst: kfp.components.OutputPath()):
import random
result = "head" if random.randint(0, 1) == 0 else "tail"
with open(dst, "w") as f:
f.write(result)
@light_component()
def list(dst: kfp.components.OutputPath()):
import json
with open(dst, "w") as f:
json.dump(["hello", "world", "kfp"], f)
@light_component()
def component_connect_demo(
src: kfp.components.InputPath(), dst: kfp.components.OutputPath()
):
with open(src, "r") as f:
line = f.readline()
print(f"read first line: {line}")
with open(dst, "w") as fw:
fw.write(f"{line} copied")
class LocalRunnerTest(unittest.TestCase):
def setUp(self):
import tempfile
with tempfile.NamedTemporaryFile('w', delete=False) as f:
self.temp_file_path = f.name
f.write("hello world")
def test_run_local(self):
def _pipeline(name: str):
hello(name)
run_pipeline_func_locally(
_pipeline,
{"name": "world"},
execution_mode=LocalClient.ExecutionMode("local"),
)
def test_local_file(self):
def _pipeline(file_path: str):
local_loader(file_path)
run_result = run_pipeline_func_locally(
_pipeline,
{"file_path": self.temp_file_path},
execution_mode=LocalClient.ExecutionMode("local"),
)
output_file_path = run_result.get_output_file("local-loader")
with open(output_file_path, "r") as f:
line = f.readline()
assert "hello" in line
def test_condition(self):
def _pipeline():
_flip = flip_coin()
with kfp.dsl.Condition(_flip.output == "head"):
hello("head")
with kfp.dsl.Condition(_flip.output == "tail"):
hello("tail")
run_pipeline_func_locally(
_pipeline, {}, execution_mode=LocalClient.ExecutionMode("local")
)
def test_for(self):
@light_component()
def cat(item, dst: OutputPath):
with open(dst, "w") as f:
f.write(item)
def _pipeline():
with kfp.dsl.ParallelFor(list().output) as item:
cat(item)
run_pipeline_func_locally(
_pipeline, {}, execution_mode=LocalClient.ExecutionMode("local")
)
def test_connect(self):
def _pipeline():
_local_loader = local_loader(self.temp_file_path)
component_connect_demo(_local_loader.output)
run_result = run_pipeline_func_locally(
_pipeline, {}, execution_mode=LocalClient.ExecutionMode("local")
)
output_file_path = run_result.get_output_file("component-connect-demo")
with open(output_file_path, "r") as f:
line = f.readline()
assert "copied" in line
def test_command_argument_in_any_format(self):
def echo():
return kfp.dsl.ContainerOp(
name="echo",
image=BASE_IMAGE,
command=["echo", "hello world", ">", "/tmp/outputs/output_file"],
arguments=[],
file_outputs={"output": "/tmp/outputs/output_file"},
)
def _pipeline():
_echo = echo()
component_connect_demo(_echo.output)
run_pipeline_func_locally(
_pipeline, {}, execution_mode=LocalClient.ExecutionMode("local")
)
@unittest.skip('docker is not installed in CI environment.')
def test_execution_mode_exclude_op(self):
@light_component(base_image="image_not_exist")
def cat_on_image_not_exist(name: str, dst: OutputPath):
with open(dst, "w") as f:
f.write(name)
def _pipeline():
cat_on_image_not_exist("exclude ops")
run_result = run_pipeline_func_locally(
_pipeline,
{},
execution_mode=LocalClient.ExecutionMode(mode="docker"),
)
output_file_path = run_result.get_output_file("cat-on-image-not-exist")
import os
assert not os.path.exists(output_file_path)
run_result = run_pipeline_func_locally(
_pipeline,
{},
execution_mode=LocalClient.ExecutionMode(
mode="docker", ops_to_exclude=["cat-on-image-not-exist"]
),
)
output_file_path = run_result.get_output_file("cat-on-image-not-exist")
with open(output_file_path, "r") as f:
line = f.readline()
assert "exclude ops" in line
|
py | 1a4becb49fe7372ed0ed3cc372677f2ee31afd0a | from unittest import TestCase
from evalytics.config import Config
from tests.common.mocks import MockConfigReader
class ConfigSut(Config, MockConfigReader):
'Injecting a mock into the Config dependency'
class TestProvidersConfig(TestCase):
def setUp(self):
self.sut = ConfigSut()
def test_read_storage_provider(self):
provider = self.sut.read_storage_provider()
self.assertEqual('storage-provider', provider)
def test_read_communication_channel_provider(self):
provider = self.sut.read_communication_channel_provider()
self.assertEqual('comm-provider', provider)
def test_read_forms_platform_provider(self):
provider = self.sut.read_forms_platform_provider()
self.assertEqual('form-provider', provider)
class TestEvalProcessConfig(TestCase):
def setUp(self):
self.sut = ConfigSut()
def test_read_eval_process_id(self):
eval_process_id = self.sut.read_eval_process_id()
self.assertEqual('eval_process_id', eval_process_id)
def test_read_eval_process_due_date(self):
eval_process_due_date = self.sut.read_eval_process_due_date()
self.assertEqual('eval_process_due_date', eval_process_due_date)
def test_read_is_add_comenter_to_eval_reports_enabled(self):
is_add_comenter_to_eval_reports_enabled = self.sut.read_is_add_comenter_to_eval_reports_enabled()
self.assertEqual(False, is_add_comenter_to_eval_reports_enabled)
class TestSlackProviderConfig(TestCase):
def setUp(self):
self.sut = ConfigSut()
def test_get_slack_token(self):
slack_token = self.sut.get_slack_token()
self.assertEqual("TOKEN::TOKEN", slack_token)
def test_get_slack_channel_param(self):
slack_channel_param = self.sut.get_slack_channel_param()
self.assertEqual('@{}', slack_channel_param)
def test_slack_message_is_direct(self):
slack_message_is_direct = self.sut.slack_message_is_direct()
self.assertEqual(True, slack_message_is_direct)
def test_slack_message_as_user_param(self):
slack_message_as_user_param = self.sut.slack_message_as_user_param()
self.assertEqual(True, slack_message_as_user_param)
def test_get_slack_users_map(self):
slack_message_as_user_param = self.sut.get_slack_users_map()
self.assertEqual({}, slack_message_as_user_param)
class TestGmailProviderConfig(TestCase):
def setUp(self):
self.sut = ConfigSut()
def test_read_mail_subject(self):
mail_subject = self.sut.read_mail_subject()
self.assertEqual('this is the mail subject', mail_subject)
def test_read_reminder_mail_subject(self):
reminder_mail_subject = self.sut.read_reminder_mail_subject()
self.assertEqual('reminder subject', reminder_mail_subject)
class TestGoogleDriveProviderConfig(TestCase):
def setUp(self):
self.sut = ConfigSut()
def test_read_google_folder(self):
google_folder = self.sut.read_google_folder()
self.assertEqual('mock_folder', google_folder)
def test_read_assignments_folder(self):
google_folder = self.sut.read_assignments_folder()
self.assertEqual('mock_assignments_folder', google_folder)
def test_read_assignments_manager_forms_folder(self):
google_folder = self.sut.read_assignments_manager_forms_folder()
self.assertEqual('mock_man_ssignments_folder', google_folder)
def test_read_google_orgchart(self):
orgchart = self.sut.read_google_orgchart()
self.assertEqual('mock_orgchart', orgchart)
def test_read_google_orgchart_range(self):
# given:
expected_value = 'A1:A1'
# when:
value = self.sut.read_google_orgchart_range()
# then:
self.assertEqual(expected_value, value)
def test_read_google_form_map(self):
formmap = self.sut.read_google_form_map()
self.assertEqual('mock_formmap', formmap)
def test_read_google_form_map_range(self):
# given:
expected_value = 'A1:A1'
# when:
value = self.sut.read_google_form_map_range()
# then:
self.assertEqual(expected_value, value)
def test_read_assignments_peers_file(self):
formmap = self.sut.read_assignments_peers_file()
self.assertEqual('assignments_peers_file', formmap)
def test_read_assignments_peers_range(self):
# given:
expected_value = 'A1:A1'
# when:
value = self.sut.read_assignments_peers_range()
# then:
self.assertEqual(expected_value, value)
def test_read_google_responses_folder(self):
tests_folder = self.sut.read_google_responses_folder()
self.assertEqual('mock_tests_folder', tests_folder)
def test_read_google_responses_files_range(self):
google_responses_files_range = self.sut.read_google_responses_files_range()
self.assertEqual('A1:A1', google_responses_files_range)
def test_read_eval_reports_folder(self):
tests_folder = self.sut.read_eval_reports_folder()
self.assertEqual('eval_reports_folder', tests_folder)
def test_read_google_eval_report_template_id(self):
template_id = self.sut.read_google_eval_report_template_id()
self.assertEqual('ID', template_id)
def test_read_google_eval_report_prefix(self):
prefix = self.sut.read_google_eval_report_prefix()
self.assertEqual('Prefix', prefix)
def test_read_google_manager_eval_by_report_prefix(self):
# given:
expected_value = 'Manager Evaluation By Report'
# when:
value = self.sut.read_google_manager_eval_by_report_prefix()
# then:
self.assertEqual(expected_value, value)
def test_read_google_report_eval_by_manager_prefix(self):
# given:
expected_value = 'Report Evaluation By Manager'
# when:
value = self.sut.read_google_report_eval_by_manager_prefix()
# then:
self.assertEqual(expected_value, value)
def test_read_google_peer_eval_by_peer_prefix(self):
# given:
expected_value = 'Peer Evaluation By Peer'
# when:
value = self.sut.read_google_peer_eval_by_peer_prefix()
# then:
self.assertEqual(expected_value, value)
def test_read_google_self_eval_prefix(self):
# given:
expected_value = 'Self Evaluation'
# when:
value = self.sut.read_google_self_eval_prefix()
# then:
self.assertEqual(expected_value, value)
class TestConfig(TestCase):
def setUp(self):
self.sut = ConfigSut()
def test_read_company_domain(self):
domain = self.sut.read_company_domain()
self.assertEqual('mock_domain.com', domain)
def test_read_company_number_of_employees(self):
number_of_employees = self.sut.read_company_number_of_employees()
self.assertEqual(20, number_of_employees)
|
py | 1a4beccaa86f1f197d69f98d320ad541b80ffa2d | # -*- encoding: utf-8 -*-
import re
import sys
from svb.multipart_data_generator import MultipartDataGenerator
from svb.test.helper import SvbTestCase
class MultipartDataGeneratorTests(SvbTestCase):
def run_test_multipart_data_with_file(self, test_file):
params = {
"key1": b"ASCII value",
"key2": u"Üñìçôdé value",
"key3": test_file
}
generator = MultipartDataGenerator()
generator.add_params(params)
http_body = generator.get_post_data()
if sys.version_info >= (3,):
http_body = http_body.decode('utf-8')
self.assertTrue(re.search(
r"Content-Disposition: form-data; name=\"key1\"", http_body))
self.assertTrue(re.search(r"ASCII value", http_body))
self.assertTrue(re.search(
r"Content-Disposition: form-data; name=\"key2\"", http_body))
self.assertTrue(re.search(r"Üñìçôdé value", http_body))
self.assertTrue(re.search(
r"Content-Disposition: form-data; name=\"key3\"; "
r"filename=\".+\"",
http_body))
self.assertTrue(re.search(
r"Content-Type: application/octet-stream", http_body))
test_file.seek(0)
file_contents = test_file.read()
if sys.version_info >= (3,) and isinstance(file_contents, bytes):
file_contents = file_contents.decode('utf-8')
self.assertNotEqual(-1, http_body.find(file_contents))
def test_multipart_data_file_text(self):
with open(__file__, mode='r') as test_file:
self.run_test_multipart_data_with_file(test_file)
def test_multipart_data_file_binary(self):
with open(__file__, mode='rb') as test_file:
self.run_test_multipart_data_with_file(test_file)
|
py | 1a4bed2fadc8de265844f7024f967b78ef5ad90f | from django.db import models
from cms.models import CMSPlugin
from djangocms_attributes_field.fields import AttributesField
class TestPlugin(CMSPlugin):
label = models.CharField(
verbose_name="Test app label",
max_length=255,
)
attributes1 = AttributesField()
attributes2 = AttributesField(
excluded_keys=["style", "src"],
)
|
py | 1a4bee7618085e46f194c2054b95d80fdbee16c6 | import plotly.graph_objects as go
def pad_list(l, n):
pad = [1] * (n - len(l))
return l + pad
def overlaid_area(df, x_column, y_column, filename, category):
df = df.sort_values(x_column)
dose_1 = df[df[category] == 'Primeira dose']
x_dose_1 = dose_1[x_column].tolist()
y_dose_1 = dose_1[y_column].cumsum()
completa = df[df[category] == 'Vacinação completa']
x_completa = completa[x_column].tolist()
y_completa = completa[y_column].cumsum()
azul_escuro = "rgb(0, 102, 255)"
azul = "rgb(102, 204, 255)"
fig = go.Figure()
fig.add_trace(go.Scatter(x=x_dose_1, y=y_dose_1, fill='tozeroy', legendgroup='a', name='Primeira dose', fillcolor=azul)) # fill down to xaxis
fig.add_trace(go.Scatter(x=x_completa, y=y_completa, fill='tozeroy', legendgroup='a', name='Vacinação completa', fillcolor=azul_escuro)) # fill to trace0 y
fig.update_layout(
showlegend=True,
xaxis_type='category',
xaxis_title="Semana - início em 17/01/2021 até 20/06/2021",
yaxis_title="Total de vacinas aplicadas",
font=dict(
size=25))
return fig
|
py | 1a4bee99a600ae6168311081d859fcac5811b665 | from gluoncv.data import COCOInstance, COCOSegmentation
from pycocotools.coco import COCO
import numpy as np
from PIL import Image, ImageOps
import os
import pickle
import random
from io import BytesIO
def randomJPEGcompression(image, min_quality=75):
qf = random.randrange(min_quality, 100)
outputIoStream = BytesIO()
image = Image.fromarray(image)
image.save(outputIoStream, "JPEG", quality=qf, optimice=True)
outputIoStream.seek(0)
return np.array(Image.open(outputIoStream))
def random_alter_background(img_np, mask_np, white_prob=0.3):
if random.random()<white_prob:
# gray or while
if random.random()<0.5:
bg_value = np.random.randint(220, 250, size=(1,1,1), dtype="uint8")
else:
bg_value = np.random.randint(250, 256, size=(1,1,1), dtype="uint8")
else:
# random color
bg_value = np.random.randint(0,255,size=(1,1,3), dtype="uint8")
# replace the background
bg_mask = mask_np[:,:,None]==0
bg = bg_value*bg_mask
img_new_np = img_np*(~bg_mask)+bg
return img_new_np
class COCOiMaterialist(COCOInstance):
CLASSES=['shirt, blouse', 'top, t-shirt, sweatshirt', 'sweater', 'cardigan', 'jacket',
'vest', 'pants', 'shorts', 'skirt', 'coat', 'dress', 'jumpsuit', 'cape',
'glasses', 'hat', 'headband, head covering, hair accessory', 'tie', 'glove',
'watch', 'belt', 'leg warmer', 'tights, stockings', 'sock', 'shoe',
'bag, wallet', 'scarf', 'umbrella', 'hood', 'collar', 'lapel', 'epaulette',
'sleeve', 'pocket', 'neckline', 'buckle', 'zipper', 'applique', 'bead',
'bow', 'flower', 'fringe', 'ribbon', 'rivet', 'ruffle', 'sequin', 'tassel']
def _load_jsons(self):
"""Load all image paths and labels from JSON annotation files into buffer."""
items = []
labels = []
segms = []
for split in self._splits:
anno = os.path.join(self._root, 'annotations', split) + '.json'
_coco = COCO(anno)
self._coco.append(_coco)
classes = [c['name'] for c in _coco.loadCats(_coco.getCatIds())]
if not classes == self.classes:
raise ValueError("Incompatible category names with COCO: ")
assert classes == self.classes
json_id_to_contiguous = {
v: k for k, v in enumerate(_coco.getCatIds())}
if self.json_id_to_contiguous is None:
self.json_id_to_contiguous = json_id_to_contiguous
self.contiguous_id_to_json = {
v: k for k, v in self.json_id_to_contiguous.items()}
else:
assert self.json_id_to_contiguous == json_id_to_contiguous
# iterate through the annotations
image_ids = sorted(_coco.getImgIds())
for entry in _coco.loadImgs(image_ids):
filename = entry['file_name']
dirname = split.split('_')[-1] # "train" or "val"
abs_path = os.path.join(self._root, dirname, filename)
if not os.path.exists(abs_path):
raise IOError('Image: {} not exists.'.format(abs_path))
label, segm = self._check_load_bbox(_coco, entry)
# skip images without objects
if self._skip_empty and label is None:
continue
items.append(abs_path)
labels.append(label)
segms.append(segm)
return items, labels, segms
def _check_load_bbox(self, coco, entry):
"""Check and load ground-truth labels"""
ann_ids = coco.getAnnIds(imgIds=entry['id'], iscrowd=None)
objs = coco.loadAnns(ann_ids)
# check valid bboxes
valid_objs = []
valid_segs = []
width = entry['width']
height = entry['height']
for obj in objs:
if obj.get('ignore', 0) == 1:
continue
# crowd objs cannot be used for segmentation
if obj.get('iscrowd', 0) == 1:
continue
# need accurate floating point box representation
x1, y1, w, h = obj['bbox']
x2, y2 = x1 + np.maximum(0, w), y1 + np.maximum(0, h)
# clip to image boundary
x1 = np.minimum(width, np.maximum(0, x1))
y1 = np.minimum(height, np.maximum(0, y1))
x2 = np.minimum(width, np.maximum(0, x2))
y2 = np.minimum(height, np.maximum(0, y2))
# require non-zero seg area and more than 1x1 box size
if obj['area'] > self._min_object_area and x2 > x1 and y2 > y1 \
and (x2 - x1) * (y2 - y1) >= 4:
contiguous_cid = self.json_id_to_contiguous[obj['category_id']]
valid_objs.append([x1, y1, x2, y2, contiguous_cid])
segs = obj['segmentation'] # polygon or RLE
assert isinstance(segs, list) or isinstance(segs, dict), '{}'.format(obj.get('iscrowd', 0))
if isinstance(segs, list):
valid_segs.append([np.asarray(p).reshape(-1, 2).astype('float32')
for p in segs if len(p) >= 6])
else:
valid_segs.append(segs)
# there is no easy way to return a polygon placeholder: None is returned
# in validation, None cannot be used for batchify -> drop label in transform
# in training: empty images should be be skipped
if not valid_objs:
valid_objs = None
valid_segs = None
else:
valid_objs = np.asarray(valid_objs).astype('float32')
return valid_objs, valid_segs
class iMaterialistSegmentation(COCOSegmentation):
"""only using categories less than 13 for segmentation"""
CAT_LIST = [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
NUM_CLASS = 14
def __init__(self, root=os.path.expanduser('datasets/imaterialist'),
split='train', mode=None, transform=None, tta=None, alter_bg=False, **kwargs):
super(COCOSegmentation, self).__init__(root, split, mode, transform, **kwargs)
from pycocotools import mask
if split == 'train':
print('train set')
ann_file = os.path.join(root, 'annotations/rle_instances_train.json')
ids_file = os.path.join(root, 'annotations/train_ids.mx')
self.root = os.path.join(root, 'train')
else:
print('val set')
ann_file = os.path.join(root, 'annotations/rle_instances_val.json')
ids_file = os.path.join(root, 'annotations/val_ids.mx')
self.root = os.path.join(root, 'val')
self.coco = COCO(ann_file)
self.coco_mask = mask
if os.path.exists(ids_file):
with open(ids_file, 'rb') as f:
self.ids = pickle.load(f)
else:
ids = list(self.coco.imgs.keys())
self.ids = self._preprocess(ids, ids_file)
self.transform = transform
self.alter_bg = alter_bg
if self.alter_bg:
self.NUM_CLASS = 2
if self.mode != "train":
self.tta = tta
def _gen_seg_mask(self, target, h, w):
mask = np.zeros((h, w), dtype=np.uint8)
coco_mask = self.coco_mask
for instance in target:
m = coco_mask.decode(instance['segmentation'])
cat = instance['category_id']
if cat in self.CAT_LIST:
c = self.CAT_LIST.index(cat)
else:
continue
if len(m.shape) < 3:
mask[:, :] += (mask == 0) * (m * c)
else:
mask[:, :] += (mask == 0) * (((np.sum(m, axis=2)) > 0) * c).astype(np.uint8)
return mask
@property
def classes(self):
"""Category names."""
if self.alter_bg:
return ('background', 'garment')
else:
return ('background', 'shirt, blouse', 'top, t-shirt, sweatshirt', 'sweater',
'cardigan', 'jacket', 'vest', 'pants', 'shorts', 'skirt', 'coat',
'dress', 'jumpsuit', 'cape')
def _sync_pad(self, img, mask):
w, h = img.size
long_size = max(w, h)
padh = long_size - h
padw = long_size - w
im_pad = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
mask_pad = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
# region for padding (set -1 later)
ignore_w = round((1-padw/float(long_size))*self.crop_size) if padw != 0 else None
ignore_h = round((1-padh/float(long_size))*self.crop_size) if padh != 0 else None
return im_pad, mask_pad, (ignore_w, ignore_h)
def _resize_short_within(self, img, short, max_size, mult_base=1, interp=Image.BILINEAR):
"""Resizes the original image by setting the shorter edge to size
and setting the longer edge accordingly. Also this function will ensure
the new image will not exceed ``max_size`` even at the longer side.
Parameters
----------
img : PIL.Image
The original image.
short : int
Resize shorter side to ``short``.
max_size : int
Make sure the longer side of new image is smaller than ``max_size``.
mult_base : int, default is 1
Width and height are rounded to multiples of `mult_base`.
interp : default is Image.BILINEAR
Returns
-------
PIL.Image
An 'PIL.Image' containing the resized image.
"""
w, h = img.size
im_size_min, im_size_max = (h, w) if w > h else (w, h)
scale = float(short) / float(im_size_min)
if np.round(scale * im_size_max / mult_base) * mult_base > max_size:
# fit in max_size
scale = float(np.floor(max_size / mult_base) * mult_base) / float(im_size_max)
new_w, new_h = (int(np.round(w * scale / mult_base) * mult_base),
int(np.round(h * scale / mult_base) * mult_base))
img = img.resize((new_w, new_h), interp)
return img
def _testval_sync_transform(self, img, mask, padding=True):
""" resize image and mask while keeping ratio"""
if padding:
# padding and resize
img, mask, keep_size = self._sync_pad(img, mask)
img = img.resize((self.crop_size, self.crop_size), Image.BILINEAR)
mask = mask.resize(img.size, Image.NEAREST)
else:
# resize without padding
short_size = self.crop_size*1.75
if max(img.size) > short_size:
img = self._resize_short_within(img, short_size, short_size*2)
mask = mask.resize(img.size, Image.NEAREST)
# final transform
img, mask = self._img_transform(img), self._mask_transform(mask)
if padding:
mask[keep_size[1]:, keep_size[0]:] = -1
return img, mask
def _random_alter_background(self, img, mask):
# alter background and random jpeg quality
img_np = img.asnumpy().astype('uint8')
mask_np = mask.asnumpy()
img_new_np = random_alter_background(img_np, mask_np)
img_new_np = randomJPEGcompression(img_new_np)
img_new = self._img_transform(img_new_np)
return img_new
def __getitem__(self, index):
coco = self.coco
img_id = self.ids[index]
img_metadata = coco.loadImgs(img_id)[0]
path = img_metadata['file_name']
img = Image.open(os.path.join(self.root, path)).convert('RGB')
cocotarget = coco.loadAnns(coco.getAnnIds(imgIds=img_id))
mask = self._gen_seg_mask(
cocotarget, img_metadata['height'], img_metadata['width'])
if self.alter_bg:
mask = (mask>0).astype('uint8')
mask = Image.fromarray(mask)
# synchrosized transform
if self.mode == 'train':
img, mask = self._sync_transform(img, mask)
if self.alter_bg and (random.random() < self.alter_bg):
img = self._random_alter_background(img, mask)
elif self.mode == 'val':
img, mask = self._val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
# resize without padding for memory reduction when test time augmentation
img, mask = self._testval_sync_transform(img, mask, not self.tta)
# general resize, normalize and toTensor
if self.transform is not None:
img = self.transform(img)
return img, mask |
py | 1a4bef495fda2919329e1c2b211d711f747f349c | '''
This is the entry point which gets invoked on run and starts a web server.
It consists of route definitions for HTTP requests.
The bottle.py package (from PyPI) provides the WSGI interface.
'''
from bottle import route, run, error
from app import App
app = App()
@route('/')
def index():
return app.index()
@route('/company/<company_name>')
def company_users(company_name):
return app.company_users(company_name)
@route('/user/<user_name>')
def user(user_name):
return app.user(user_name)
@route('/user/<user_name_1>/<user_name_2>')
def two_users(user_name_1, user_name_2):
return app.two_users(user_name_1, user_name_2)
@error(404)
def error_404(error):
return app.error_404(error)
if __name__ == "__main__":
run(host='0.0.0.0', port=8080, debug=True) |
py | 1a4bf039934b6770996467144b54d18f697e2818 | # -*- coding: utf-8 -*-
import pprint
import bag
#from laygo import *
import laygo
import numpy as np
import yaml
import matplotlib.pyplot as plt
lib_name = 'adc_sar_templates'
cell_name = 'sarclkgen_static'
impl_lib = 'adc_sar_generated'
#tb_lib = 'adc_sar_testbenches'
#tb_cell = 'capdac_8b_tb_tran'
load_from_file=True
yamlfile_spec="adc_sar_spec.yaml"
yamlfile_size="adc_sar_size.yaml"
'''
if load_from_file==True:
with open(yamlfile_spec, 'r') as stream:
specdict = yaml.load(stream)
with open(yamlfile_size, 'r') as stream:
sizedict = yaml.load(stream)
cell_name='sarabe_dualdelay_'+str(specdict['n_bit'])+'b'
'''
verify_lvs = True
print('creating BAG project')
prj = bag.BagProject()
#lvs
if verify_lvs==True:
# run lvs
print('running lvs')
lvs_passed, lvs_log = prj.run_lvs(impl_lib, cell_name)
if not lvs_passed:
raise Exception('oops lvs died. See LVS log file %s' % lvs_log)
print('lvs passed')
|
py | 1a4bf27eb3ceb5d0e1376f0f563e26e11865244b | """Important Bodies.
Contains some predefined bodies of the Solar System:
* Sun (☉)
* Earth (♁)
* Moon (☾)
* Mercury (☿)
* Venus (♀)
* Mars (♂)
* Jupiter (♃)
* Saturn (♄)
* Uranus (⛢)
* Neptune (♆)
* Pluto (♇)
and a way to define new bodies (:py:class:`~Body` class).
Data references can be found in :py:mod:`~einsteinpy.constant`
"""
import astropy.units as u
from einsteinpy import constant
from einsteinpy.coordinates import CartesianDifferential
class Body:
"""
Class to create a generic Body
"""
@u.quantity_input(mass=u.kg, R=u.km)
def __init__(
self,
name="Generic Body",
mass=0 * u.kg,
R=0 * u.km,
differential=None,
a=0 * u.m,
q=0 * u.C,
parent=None,
):
"""
Parameters
----------
name : str
Name/ID of the body
mass : ~astropy.units.kg
Mass of the body
R : ~astropy.units
Radius of the body
differential : ~einsteinpy.coordinates, optional
Complete coordinates of the body
a : ~astropy.units.m, optional
Spin factor of massive body. Should be less than half of schwarzschild radius.
q : ~astropy.units.C, optional
Charge on the massive body
is_attractor : Bool, optional
To denote is this body is acting as attractor or not
parent : Body, optional
The parent object of the body.
"""
if differential:
if differential.system == "Cartesian":
self.pos_vec = [differential.x, differential.y, differential.z]
self.vel_vec = [differential.v_x, differential.v_y, differential.v_z]
else:
self.pos_vec = [differential.r, differential.theta, differential.phi]
self.vel_vec = [differential.v_r, differential.v_t, differential.v_p]
self.a = a
self.R = R
self.q = q
self.mass = mass
self.name = name
self.coordinates = differential
self.parent = parent
def __repr__(self):
return (
"'Body ( name: ({0}), mass: ({1}), radius: ({2}), coordinates: ({3}), spin factor: ({4}), charge: ({"
"5}) )'".format(
self.name, self.mass, self.R, self.coordinates, self.a, self.q
)
)
def __str__(self):
return (
"Body ( name: ({0}), mass: ({1}), radius: ({2}), coordinates: ({3}), spin factor: ({4}), charge: ({"
"5}) )".format(
self.name, self.mass, self.R, self.coordinates, self.a, self.q
)
)
class _Sun(Body):
def __init__(self):
parent = None
name = "Sun"
R = constant.R_sun
mass = constant.Solar_Mass
super(_Sun, self).__init__(name=name, mass=mass, R=R, parent=parent)
Sun = _Sun()
class _Earth(Body):
def __init__(self):
parent = Sun
name = "Earth"
R = 6731 * u.km
mass = 5.97219e24 * u.kg
super(_Earth, self).__init__(name=name, mass=mass, R=R, parent=parent)
Earth = _Earth()
class _Moon(Body):
def __init__(self):
parent = Earth
name = "Moon"
R = 1737.5 * u.km
mass = 7.34767309e22 * u.kg
super(_Moon, self).__init__(name=name, mass=mass, R=R, parent=parent)
Moon = _Moon()
|
py | 1a4bf319ddb2e554f00f0426e9425c0bf61ba123 | # -*- coding: utf-8 -*-
# Copyright (c) 2018, VMRaid Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import unittest
class TestPrintSettings(unittest.TestCase):
pass
|
py | 1a4bf416c98c8ed612f275ccd4aad4885c802e9a | import re
import typing
import pytest
from dagster import (
Any,
DagsterInvalidConfigDefinitionError,
DagsterInvalidConfigError,
DagsterInvalidDefinitionError,
Field,
Float,
Int,
List,
ModeDefinition,
Noneable,
Permissive,
PipelineDefinition,
ResourceDefinition,
Set,
String,
Tuple,
composite_solid,
execute_pipeline,
execute_solid,
pipeline,
solid,
)
from dagster.config.errors import DagsterEvaluationErrorReason
from dagster.config.field_utils import convert_potential_field
from dagster.config.validate import process_config, validate_config
def test_noop_config():
assert Field(Any)
def test_int_field():
config_field = convert_potential_field({'int_field': Int})
assert validate_config(config_field.config_type, {'int_field': 1}).value == {'int_field': 1}
def test_float_field():
config_field = convert_potential_field({'float_field': Float})
assert validate_config(config_field.config_type, {'float_field': 1.0}).value == {
'float_field': 1.0
}
assert process_config(config_field.config_type, {'float_field': 1.0}).value == {
'float_field': 1.0
}
assert validate_config(config_field.config_type, {'float_field': 1}).value == {'float_field': 1}
assert process_config(config_field.config_type, {'float_field': 1}).value == {
'float_field': 1.0
}
def assert_config_value_success(config_type, config_value, expected):
result = process_config(config_type, config_value)
assert result.success
assert result.value == expected
def assert_eval_failure(config_type, value):
assert not validate_config(config_type, value).success
def test_int_fails():
config_field = convert_potential_field({'int_field': Int})
assert_eval_failure(config_field.config_type, {'int_field': 'fjkdj'})
assert_eval_failure(config_field.config_type, {'int_field': True})
def test_default_arg():
config_field = convert_potential_field(
{'int_field': Field(Int, default_value=2, is_required=False)}
)
assert_config_value_success(config_field.config_type, {}, {'int_field': 2})
def test_default_float_arg():
config_field = convert_potential_field(
{'float_field': Field(Float, default_value=2.0, is_required=False)}
)
assert_config_value_success(config_field.config_type, {}, {'float_field': 2.0})
config_field = convert_potential_field(
{'float_field': Field(Float, default_value=2, is_required=False)}
)
assert_config_value_success(config_field.config_type, {}, {'float_field': 2})
def _single_required_string_config_dict():
return convert_potential_field({'string_field': String})
def _multiple_required_fields_config_dict():
return convert_potential_field({'field_one': String, 'field_two': String})
def _single_optional_string_config_dict():
return convert_potential_field({'optional_field': Field(String, is_required=False)})
def _single_optional_string_field_config_dict_with_default():
optional_field_def = Field(String, is_required=False, default_value='some_default')
return convert_potential_field({'optional_field': optional_field_def})
def _mixed_required_optional_string_config_dict_with_default():
return convert_potential_field(
{
'optional_arg': Field(String, is_required=False, default_value='some_default'),
'required_arg': Field(String, is_required=True),
'optional_arg_no_default': Field(String, is_required=False),
}
)
def _multiple_required_fields_config_permissive_dict():
return Field(Permissive({'field_one': Field(String), 'field_two': Field(String)}))
def _validate(config_field, value):
res = process_config(config_field.config_type, value)
assert res.success, res.errors[0].message
return res.value
def test_single_required_string_field_config_type():
assert _validate(_single_required_string_config_dict(), {'string_field': 'value'}) == {
'string_field': 'value'
}
with pytest.raises(
AssertionError,
match=(
re.escape(
'Missing required field "string_field" at the root. Available Fields: '
'"[\'string_field\']".'
)
),
):
_validate(_single_required_string_config_dict(), {})
with pytest.raises(AssertionError):
_validate(_single_required_string_config_dict(), {'extra': 'yup'})
with pytest.raises(AssertionError):
_validate(_single_required_string_config_dict(), {'string_field': 'yupup', 'extra': 'yup'})
with pytest.raises(AssertionError):
_validate(_single_required_string_config_dict(), {'string_field': 1})
def test_undefined_field_error():
with pytest.raises(
AssertionError,
match=('Undefined field "extra" at the root. Expected: "{ string_field: ' 'String }".'),
):
_validate(
_single_required_string_config_dict(), {'string_field': 'value', 'extra': 'extra'}
)
def test_multiple_required_fields_passing():
assert _validate(
_multiple_required_fields_config_dict(),
{'field_one': 'value_one', 'field_two': 'value_two'},
) == {'field_one': 'value_one', 'field_two': 'value_two'}
def test_multiple_required_fields_failing():
with pytest.raises(AssertionError):
_validate(_multiple_required_fields_config_dict(), {})
with pytest.raises(AssertionError):
_validate(_multiple_required_fields_config_dict(), {'field_one': 'yup'})
with pytest.raises(AssertionError):
_validate(_multiple_required_fields_config_dict(), {'field_one': 'yup', 'extra': 'yup'})
with pytest.raises(AssertionError):
_validate(
_multiple_required_fields_config_dict(),
{'field_one': 'yup', 'field_two': 'yup', 'extra': 'should_not_exist'},
)
with pytest.raises(AssertionError):
_validate(
_multiple_required_fields_config_dict(), {'field_one': 'value_one', 'field_two': 2}
)
def test_single_optional_field_passing():
assert _validate(_single_optional_string_config_dict(), {'optional_field': 'value'}) == {
'optional_field': 'value'
}
assert _validate(_single_optional_string_config_dict(), {}) == {}
with pytest.raises(AssertionError):
assert _validate(_single_optional_string_config_dict(), {'optional_field': None}) == {
'optional_field': None
}
def test_single_optional_field_failing():
with pytest.raises(AssertionError):
_validate(_single_optional_string_config_dict(), {'optional_field': 1})
with pytest.raises(AssertionError):
_validate(_single_optional_string_config_dict(), {'dlkjfalksdjflksaj': 1})
def test_single_optional_field_passing_with_default():
assert _validate(_single_optional_string_field_config_dict_with_default(), {}) == {
'optional_field': 'some_default'
}
assert _validate(
_single_optional_string_field_config_dict_with_default(), {'optional_field': 'override'}
) == {'optional_field': 'override'}
def test_permissive_multiple_required_fields_passing():
assert _validate(
_multiple_required_fields_config_permissive_dict(),
{
'field_one': 'value_one',
'field_two': 'value_two',
'previously_unspecified': 'should_exist',
},
) == {
'field_one': 'value_one',
'field_two': 'value_two',
'previously_unspecified': 'should_exist',
}
def test_permissive_multiple_required_fields_nested_passing():
assert _validate(
_multiple_required_fields_config_permissive_dict(),
{
'field_one': 'value_one',
'field_two': 'value_two',
'previously_unspecified': {'nested': 'value', 'with_int': 2},
},
) == {
'field_one': 'value_one',
'field_two': 'value_two',
'previously_unspecified': {'nested': 'value', 'with_int': 2},
}
def test_permissive_multiple_required_fields_failing():
with pytest.raises(AssertionError):
_validate(_multiple_required_fields_config_permissive_dict(), {})
with pytest.raises(AssertionError):
_validate(_multiple_required_fields_config_permissive_dict(), {'field_one': 'yup'})
with pytest.raises(AssertionError):
_validate(
_multiple_required_fields_config_permissive_dict(),
{'field_one': 'value_one', 'field_two': 2},
)
def test_mixed_args_passing():
assert _validate(
_mixed_required_optional_string_config_dict_with_default(),
{'optional_arg': 'value_one', 'required_arg': 'value_two'},
) == {'optional_arg': 'value_one', 'required_arg': 'value_two'}
assert _validate(
_mixed_required_optional_string_config_dict_with_default(), {'required_arg': 'value_two'}
) == {'optional_arg': 'some_default', 'required_arg': 'value_two'}
assert _validate(
_mixed_required_optional_string_config_dict_with_default(),
{'required_arg': 'value_two', 'optional_arg_no_default': 'value_three'},
) == {
'optional_arg': 'some_default',
'required_arg': 'value_two',
'optional_arg_no_default': 'value_three',
}
def _single_nested_config():
return convert_potential_field({'nested': {'int_field': Int}})
def _nested_optional_config_with_default():
return convert_potential_field(
{'nested': {'int_field': Field(Int, is_required=False, default_value=3)}}
)
def _nested_optional_config_with_no_default():
return convert_potential_field({'nested': {'int_field': Field(Int, is_required=False)}})
def test_single_nested_config():
assert _validate(_single_nested_config(), {'nested': {'int_field': 2}}) == {
'nested': {'int_field': 2}
}
def test_single_nested_config_undefined_errors():
with pytest.raises(
AssertionError,
match='Value at path root:nested must be dict. Expected: "{ int_field: Int }".',
):
_validate(_single_nested_config(), {'nested': 'dkjfdk'})
with pytest.raises(
AssertionError,
match='Invalid scalar at path root:nested:int_field. Value "dkjfdk" of type .* is not valid for expected type "Int".',
):
_validate(_single_nested_config(), {'nested': {'int_field': 'dkjfdk'}})
with pytest.raises(
AssertionError,
match=(
'Undefined field "not_a_field" at path root:nested. Expected: ' '"{ int_field: Int }".'
),
):
_validate(_single_nested_config(), {'nested': {'int_field': 2, 'not_a_field': 1}})
with pytest.raises(
AssertionError,
match='Invalid scalar at path root:nested:int_field. Value "{\'too_nested\': \'dkjfdk\'}" of type .* is not valid for expected type "Int".',
):
_validate(_single_nested_config(), {'nested': {'int_field': {'too_nested': 'dkjfdk'}}})
def test_nested_optional_with_default():
assert _validate(_nested_optional_config_with_default(), {'nested': {'int_field': 2}}) == {
'nested': {'int_field': 2}
}
assert _validate(_nested_optional_config_with_default(), {'nested': {}}) == {
'nested': {'int_field': 3}
}
def test_nested_optional_with_no_default():
assert _validate(_nested_optional_config_with_no_default(), {'nested': {'int_field': 2}}) == {
'nested': {'int_field': 2}
}
assert _validate(_nested_optional_config_with_no_default(), {'nested': {}}) == {'nested': {}}
def test_config_defaults():
@solid(config_schema={"sum": Int})
def two(_context):
assert _context.solid_config['sum'] == 6
return _context.solid_config['sum']
@solid(config_schema={"sum": Int})
def one(_context, prev_sum):
assert prev_sum == 6
return prev_sum + _context.solid_config['sum']
# addition_composite_solid
def addition_composite_solid_config_fn(config):
child_config = {'config': {"sum": config['a'] + config['b'] + config['c']}}
return {'one': child_config, 'two': child_config}
@composite_solid(
config_fn=addition_composite_solid_config_fn,
config_schema={
"a": Field(Int, is_required=False, default_value=1),
"b": Field(Int, is_required=False, default_value=2),
"c": Int,
},
)
def addition_composite_solid():
return one(two())
@pipeline
def addition_pipeline():
addition_composite_solid()
result = execute_pipeline(
addition_pipeline, {'solids': {'addition_composite_solid': {'config': {'c': 3}}}}
)
assert result.success
def test_config_with_and_without_config():
@solid(config_schema={'prefix': Field(str, is_required=False, default_value='_')})
def prefix_value(context, v):
return '{prefix}{v}'.format(prefix=context.solid_config["prefix"], v=v)
@composite_solid(
config_fn=lambda cfg: {'prefix_value': {'config': {'prefix': cfg['prefix']}}},
config_schema={'prefix': Field(str, is_required=False, default_value='_id_')},
)
def prefix_id(val):
return prefix_value(val)
@solid
def print_value(_, v):
return str(v)
@pipeline
def config_issue_pipeline():
v = prefix_id()
print_value(v)
result = execute_pipeline(
config_issue_pipeline,
{
'solids': {
'prefix_id': {
'config': {'prefix': '_customprefix_'},
'inputs': {'val': {'value': "12345"}},
}
}
},
)
assert result.success
assert result.result_for_solid('print_value').output_value() == '_customprefix_12345'
result_using_default = execute_pipeline(
config_issue_pipeline,
{'solids': {'prefix_id': {'config': {}, 'inputs': {'val': {'value': "12345"}}}}},
)
assert result_using_default.success
assert result_using_default.result_for_solid('print_value').output_value() == '_id_12345'
def test_build_optionality():
optional_test_type = convert_potential_field(
{'required': {'value': String}, 'optional': {'value': Field(String, is_required=False)},}
).config_type
assert optional_test_type.fields['required'].is_required
assert optional_test_type.fields['optional'].is_required is False
def test_wrong_solid_name():
@solid(name='some_solid', input_defs=[], output_defs=[], config_schema=Int)
def some_solid(_):
return None
@pipeline(name='pipeline_wrong_solid_name')
def pipeline_def():
some_solid()
env_config = {'solids': {'another_name': {'config': {}}}}
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(pipeline_def, env_config)
pe = pe_info.value
assert 'Undefined field "another_name" at path root:solids' in str(pe)
def fail_me():
assert False
def dummy_resource(config_schema=None):
return ResourceDefinition(lambda: None, config_schema=config_schema)
def test_wrong_resources():
pipeline_def = PipelineDefinition(
name='pipeline_test_multiple_context',
mode_defs=[
ModeDefinition(
resource_defs={'resource_one': dummy_resource(), 'resource_two': dummy_resource()}
)
],
solid_defs=[],
)
with pytest.raises(
DagsterInvalidConfigError, match='Undefined field "nope" at path root:resources'
):
execute_pipeline(pipeline_def, {'resources': {'nope': {}}})
def test_solid_list_config():
value = [1, 2]
called = {}
@solid(name='solid_list_config', input_defs=[], output_defs=[], config_schema=[int])
def solid_list_config(context):
assert context.solid_config == value
called['yup'] = True
@pipeline(name='solid_list_config_pipeline')
def pipeline_def():
solid_list_config()
result = execute_pipeline(
pipeline_def, run_config={'solids': {'solid_list_config': {'config': value}}}
)
assert result.success
assert called['yup']
def test_two_list_types():
@solid(
input_defs=[], config_schema={'list_one': [int], 'list_two': [int]},
)
def two_list_type(context):
return context.solid_config
assert execute_solid(
two_list_type,
run_config={'solids': {'two_list_type': {'config': {'list_one': [1], 'list_two': [2]}}}},
).output_value() == {'list_one': [1], 'list_two': [2]}
@solid(
input_defs=[], config_schema={'list_one': [Int], 'list_two': [Int]},
)
def two_list_type_condensed_syntax(context):
return context.solid_config
assert execute_solid(
two_list_type_condensed_syntax,
run_config={
'solids': {
'two_list_type_condensed_syntax': {'config': {'list_one': [1], 'list_two': [2]}}
}
},
).output_value() == {'list_one': [1], 'list_two': [2]}
@solid(
input_defs=[], config_schema={'list_one': [int], 'list_two': [int]},
)
def two_list_type_condensed_syntax_primitives(context):
return context.solid_config
assert execute_solid(
two_list_type_condensed_syntax_primitives,
run_config={
'solids': {
'two_list_type_condensed_syntax_primitives': {
'config': {'list_one': [1], 'list_two': [2]}
}
}
},
).output_value() == {'list_one': [1], 'list_two': [2]}
def test_multilevel_default_handling():
@solid(config_schema=Field(Int, is_required=False, default_value=234))
def has_default_value(context):
assert context.solid_config == 234
pipeline_def = PipelineDefinition(
name='multilevel_default_handling', solid_defs=[has_default_value]
)
assert execute_pipeline(pipeline_def).success
assert execute_pipeline(pipeline_def, run_config=None).success
assert execute_pipeline(pipeline_def, run_config={}).success
assert execute_pipeline(pipeline_def, run_config={'solids': {}}).success
assert execute_pipeline(pipeline_def, run_config={'solids': {'has_default_value': {}}}).success
assert execute_pipeline(
pipeline_def, run_config={'solids': {'has_default_value': {'config': 234}}}
).success
def test_no_env_missing_required_error_handling():
@solid(config_schema=Int)
def required_int_solid(_context):
pass
pipeline_def = PipelineDefinition(
name='no_env_missing_required_error', solid_defs=[required_int_solid]
)
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(pipeline_def)
assert isinstance(pe_info.value, DagsterInvalidConfigError)
pe = pe_info.value
assert len(pe.errors) == 1
mfe = pe.errors[0]
assert mfe.reason == DagsterEvaluationErrorReason.MISSING_REQUIRED_FIELD
assert len(pe.errors) == 1
assert pe.errors[0].message == (
'''Missing required field "solids" at the root. '''
'''Available Fields: "['execution', 'loggers', '''
''''resources', 'solids', 'storage']".'''
)
def test_root_extra_field():
@solid(config_schema=Int)
def required_int_solid(_context):
pass
@pipeline
def pipeline_def():
required_int_solid()
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(
pipeline_def,
run_config={'solids': {'required_int_solid': {'config': 948594}}, 'nope': None},
)
pe = pe_info.value
assert len(pe.errors) == 1
fnd = pe.errors[0]
assert fnd.reason == DagsterEvaluationErrorReason.FIELD_NOT_DEFINED
assert 'Undefined field "nope"' in pe.message
def test_deeper_path():
@solid(config_schema=Int)
def required_int_solid(_context):
pass
@pipeline
def pipeline_def():
required_int_solid()
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(
pipeline_def, run_config={'solids': {'required_int_solid': {'config': 'asdf'}}}
)
pe = pe_info.value
assert len(pe.errors) == 1
rtm = pe.errors[0]
assert rtm.reason == DagsterEvaluationErrorReason.RUNTIME_TYPE_MISMATCH
def test_working_list_path():
called = {}
@solid(config_schema=[int])
def required_list_int_solid(context):
assert context.solid_config == [1, 2]
called['yup'] = True
@pipeline
def pipeline_def():
required_list_int_solid()
result = execute_pipeline(
pipeline_def, run_config={'solids': {'required_list_int_solid': {'config': [1, 2]}}}
)
assert result.success
assert called['yup']
def test_item_error_list_path():
called = {}
@solid(config_schema=[int])
def required_list_int_solid(context):
assert context.solid_config == [1, 2]
called['yup'] = True
@pipeline
def pipeline_def():
required_list_int_solid()
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(
pipeline_def,
run_config={'solids': {'required_list_int_solid': {'config': [1, 'nope']}}},
)
pe = pe_info.value
assert len(pe.errors) == 1
rtm = pe.errors[0]
assert rtm.reason == DagsterEvaluationErrorReason.RUNTIME_TYPE_MISMATCH
assert 'Invalid scalar at path root:solids:required_list_int_solid:config[1]' in str(pe)
def test_list_in_config_error():
error_msg = (
'Cannot use List in the context of config. '
'Please use a python list (e.g. [int]) or dagster.Array (e.g. Array(int)) instead.'
)
with pytest.raises(DagsterInvalidDefinitionError, match=re.escape(error_msg)):
@solid(config_schema=List[int])
def _no_runtime_list_in_config(_):
pass
def test_required_resource_not_given():
@pipeline(
name='required_resource_not_given',
mode_defs=[ModeDefinition(resource_defs={'required': dummy_resource(Int)})],
)
def pipeline_def():
pass
with pytest.raises(DagsterInvalidConfigError) as not_none_pe_info:
execute_pipeline(pipeline_def, run_config={'resources': None})
assert len(not_none_pe_info.value.errors) == 1
assert (
'Value at path root:resources must be not be None.'
in not_none_pe_info.value.errors[0].message
)
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(pipeline_def, run_config={'resources': {}})
pe = pe_info.value
error = pe.errors[0]
assert error.reason == DagsterEvaluationErrorReason.MISSING_REQUIRED_FIELD
assert (
error.message == 'Missing required field "required" at path root:resources. '
'Available Fields: "[\'required\']".'
)
def test_multilevel_good_error_handling_solids():
@solid(config_schema=Int)
def good_error_handling(_context):
pass
@pipeline
def pipeline_def():
good_error_handling()
with pytest.raises(DagsterInvalidConfigError) as not_none_pe_info:
execute_pipeline(pipeline_def, run_config={'solids': None})
assert len(not_none_pe_info.value.errors) == 1
assert (
'Value at path root:solids must be not be None.' in not_none_pe_info.value.errors[0].message
)
with pytest.raises(DagsterInvalidConfigError) as missing_field_pe_info:
execute_pipeline(pipeline_def, run_config={'solids': {}})
assert len(missing_field_pe_info.value.errors) == 1
assert missing_field_pe_info.value.errors[0].message == (
'''Missing required field "good_error_handling" at path root:solids. '''
'''Available Fields: "['good_error_handling']".'''
)
def test_multilevel_good_error_handling_solid_name_solids():
@solid(config_schema=Int)
def good_error_handling(_context):
pass
@pipeline
def pipeline_def():
good_error_handling()
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(pipeline_def, run_config={'solids': {'good_error_handling': {}}})
assert len(pe_info.value.errors) == 1
assert pe_info.value.errors[0].message == (
'''Missing required field "config" at path root:solids:good_error_handling. '''
'''Available Fields: "['config', 'outputs']".'''
)
def test_multilevel_good_error_handling_config_solids_name_solids():
@solid(config_schema=Noneable(int))
def good_error_handling(_context):
pass
@pipeline
def pipeline_def():
good_error_handling()
execute_pipeline(pipeline_def, run_config={'solids': {'good_error_handling': {'config': None}}})
def test_invalid_default_values():
with pytest.raises(
DagsterInvalidConfigError,
match='Value "3" of type .* is not valid for expected type "Int"',
):
@solid(config_schema=Field(Int, default_value='3'))
def _solid(_):
pass
def test_typing_types_into_config():
match_str = re.escape(
'You have passed in typing.List to the config system. '
'Types from the typing module in python are not allowed '
'in the config system. You must use types that are imported '
'from dagster or primitive types such as bool, int, etc.'
)
with pytest.raises(DagsterInvalidDefinitionError, match=match_str):
@solid(config_schema=Field(typing.List))
def _solid(_):
pass
with pytest.raises(DagsterInvalidDefinitionError, match=match_str):
@solid(config_schema=typing.List)
def _solid(_):
pass
match_str = re.escape(
'You have passed in typing.List[int] to the config system. Types '
'from the typing module in python are not allowed in the config system. '
'You must use types that are imported from dagster or primitive types '
'such as bool, int, etc.'
)
with pytest.raises(DagsterInvalidDefinitionError, match=match_str):
@solid(config_schema=Field(typing.List[int]))
def _solid(_):
pass
with pytest.raises(DagsterInvalidDefinitionError, match=match_str):
@solid(config_schema=typing.List[int])
def _solid(_):
pass
for ttype in [
typing.Optional[int],
typing.Set,
typing.Set[int],
typing.Dict,
typing.Dict[int, str],
typing.Tuple,
typing.Tuple[int, int],
]:
with pytest.raises(DagsterInvalidDefinitionError):
@solid(config_schema=Field(ttype))
def _solid(_):
pass
def test_no_set_in_config_system():
set_error_msg = re.escape('Cannot use Set in the context of a config field.')
with pytest.raises(DagsterInvalidDefinitionError, match=set_error_msg):
@solid(config_schema=Field(Set))
def _bare_open_set(_):
pass
with pytest.raises(DagsterInvalidDefinitionError, match=set_error_msg):
@solid(config_schema=Set)
def _bare_open_set(_):
pass
with pytest.raises(DagsterInvalidDefinitionError, match=set_error_msg):
@solid(config_schema=Field(Set[int]))
def _bare_closed_set(_):
pass
with pytest.raises(DagsterInvalidDefinitionError, match=set_error_msg):
@solid(config_schema=Set[int])
def _bare_closed_set(_):
pass
def test_no_tuple_in_config_system():
tuple_error_msg = re.escape('Cannot use Tuple in the context of a config field.')
with pytest.raises(DagsterInvalidDefinitionError, match=tuple_error_msg):
@solid(config_schema=Field(Tuple))
def _bare_open_tuple(_):
pass
with pytest.raises(DagsterInvalidDefinitionError, match=tuple_error_msg):
@solid(config_schema=Field(Tuple[int]))
def _bare_closed_set(_):
pass
def test_field_is_none():
with pytest.raises(DagsterInvalidConfigDefinitionError) as exc_info:
@solid(config_schema={'none_field': None})
def _none_is_bad(_):
pass
assert 'Fields cannot be None' in str(exc_info.value)
|
py | 1a4bf4b10f3fe992885f1ea66706c386f8593ab0 | """
No Operation Operator
This does nothing - without error, useful for testing.
It optionally prints the class name - intended to determine when and that
the Operator was run - for testing.
"""
from ..flows.internals.base_operator import BaseOperator
class NoOpOperator(BaseOperator):
def __init__(self, print_message=False):
self.print_message = print_message
super().__init__()
def execute(self, data={}, context={}):
if self.print_message:
print(self.__class__.__name__)
return data, context
|
py | 1a4bf4eabded1cc1d394e2e6b19cdbe4383c70f1 | """
MIT License
Copyright (c) 2020 Airbyte
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import json
def test_example_method():
assert {"hello": "world"} == json.loads('{"hello":"world"}')
|
py | 1a4bf5b60f43ed0889112749e841731ff374f6c5 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_profile_udp
short_description: Manage UDP profiles on a BIG-IP
description:
- Manage UDP profiles on a BIG-IP. There are a variety of UDP profiles, each with their
own adjustments to the standard C(udp) profile. Users of this module should be aware
that many of the adjustable knobs have no module default. Instead, the default is
assigned by the BIG-IP system itself which, in most cases, is acceptable.
version_added: 2.6
options:
name:
description:
- Specifies the name of the profile.
required: True
parent:
description:
- Specifies the profile from which this profile inherits settings.
- When creating a new profile, if this parameter is not specified, the default
is the system-supplied C(udp) profile.
idle_timeout:
description:
- Specifies the length of time that a connection is idle (has no traffic) before
the connection is eligible for deletion.
- When creating a new profile, if this parameter is not specified, the remote
device will choose a default value appropriate for the profile, based on its
C(parent) profile.
- When a number is specified, indicates the number of seconds that the UDP
connection can remain idle before the system deletes it.
- When C(0), or C(indefinite), specifies that UDP connections can remain idle
indefinitely.
- When C(immediate), specifies that you do not want the UDP connection to
remain idle, and that it is therefore immediately eligible for deletion.
datagram_load_balancing:
description:
- Specifies, when C(yes), that the system load balances UDP traffic
packet-by-packet.
type: bool
partition:
description:
- Device partition to manage resources on.
default: Common
state:
description:
- When C(present), ensures that the profile exists.
- When C(absent), ensures the profile is removed.
default: present
choices:
- present
- absent
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a TCP profile
bigip_profile_tcp:
name: foo
parent: udp
idle_timeout: 300
datagram_load_balancing: no
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
'''
RETURN = r'''
parent:
description: The new parent of the resource.
returned: changed
type: string
sample: udp
idle_timeout:
description: The new idle timeout of the resource.
returned: changed
type: int
sample: 100
datagram_load_balancing:
description: The new datagram load balancing setting of the resource.
returned: changed
type: bool
sample: True
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {
'datagramLoadBalancing': 'datagram_load_balancing',
'idleTimeout': 'idle_timeout',
'defaultsFrom': 'parent'
}
api_attributes = [
'datagramLoadBalancing',
'idleTimeout',
'defaultsFrom'
]
returnables = [
'datagram_load_balancing',
'idle_timeout',
'parent'
]
updatables = [
'datagram_load_balancing',
'idle_timeout',
'parent'
]
@property
def idle_timeout(self):
if self._values['idle_timeout'] is None:
return None
if self._values['idle_timeout'] in ['indefinite', 'immediate']:
return self._values['idle_timeout']
return int(self._values['idle_timeout'])
class ApiParameters(Parameters):
@property
def datagram_load_balancing(self):
if self._values['datagram_load_balancing'] is None:
return None
if self._values['datagram_load_balancing'] == 'enabled':
return True
return False
class ModuleParameters(Parameters):
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def datagram_load_balancing(self):
if self._values['datagram_load_balancing'] is None:
return None
if self._values['datagram_load_balancing']:
return 'enabled'
return 'disabled'
class ReportableChanges(Changes):
@property
def datagram_load_balancing(self):
if self._values['datagram_load_balancing'] is None:
return None
if self._values['datagram_load_balancing'] == 'enabled':
return True
return False
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
result = self.client.api.tm.ltm.profile.udps.udp.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.changes.api_params()
self.client.api.tm.ltm.profile.udps.udp.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def update_on_device(self):
params = self.changes.api_params()
resource = self.client.api.tm.ltm.profile.udps.udp.load(
name=self.want.name,
partition=self.want.partition
)
resource.modify(**params)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
resource = self.client.api.tm.ltm.profile.udps.udp.load(
name=self.want.name,
partition=self.want.partition
)
if resource:
resource.delete()
def read_current_from_device(self):
resource = self.client.api.tm.ltm.profile.udps.udp.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return ApiParameters(params=result)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(),
idle_timeout=dict(),
datagram_load_balancing=dict(type='bool'),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
py | 1a4bf6de6695c19d37ed2dba25e36b9d6a9928ee | # Generated by Django 3.1.11 on 2021-06-01 08:02
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("auth", "0012_alter_user_first_name_max_length"),
("organizations", "0002_auto_20210512_1437"),
]
operations = [
migrations.AlterField(
model_name="organization",
name="editors_group",
field=models.OneToOneField(
editable=False,
on_delete=django.db.models.deletion.PROTECT,
related_name="editors_of_organization",
to="auth.group",
),
),
migrations.AlterField(
model_name="organization",
name="members_group",
field=models.OneToOneField(
editable=False,
on_delete=django.db.models.deletion.PROTECT,
related_name="members_of_organization",
to="auth.group",
),
),
]
|
py | 1a4bf72a7e1ad07af3a310ee648d732be963d9a2 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
from osc_lib import exceptions
from neutron_diagnose.command import commandmanager
from neutron_diagnose.i18n import _
class CheckSgRule(commandmanager.ShowOne):
_description = _("Compare the security group rule in DataBase with "
"iptables rules in related compute node.")
def get_parser(self, prog_name):
parser = super(CheckSgRule, self).get_parser(prog_name)
parser.add_argument(
'port-id',
metavar='<port-id>',
help=_('the port uuid.'),
)
return parser
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
network_client = self.app.client_manager.network
ssh_client = self.app.client_manager.ssh
result = {}
return commandmanager.set_result(result)
|
py | 1a4bf7da7ba89535b38a232424c794cde7d5a6ce | from business_rules.variables import BaseVariables, rule_variable
from business_rules.operators import StringType
from . import TestCase
class VariablesClassTests(TestCase):
""" Test methods on classes that inherit from BaseVariables
"""
def test_base_has_no_variables(self):
self.assertEqual(len(BaseVariables.get_all_variables()), 0)
def test_get_all_variables(self):
""" Returns a dictionary listing all the functions on the class that
have been decorated as variables, with some of the data about them.
"""
class SomeVariables(BaseVariables):
@rule_variable(StringType)
def this_is_rule_1(self):
return "blah"
def non_rule(self):
return "baz"
vars = SomeVariables.get_all_variables()
self.assertEqual(len(vars), 1)
self.assertEqual(vars[0]['name'], 'this_is_rule_1')
self.assertEqual(vars[0]['label'], 'This Is Rule 1')
self.assertEqual(vars[0]['field_type'], 'string')
self.assertEqual(vars[0]['options'], [])
# should work on an instance of the class too
self.assertEqual(len(SomeVariables().get_all_variables()), 1)
|
py | 1a4bf890b83da8c749a9f44c8da4b57adcd154c3 | __author__ = 'ipetrash'
# https://docs.python.org/3.4/tutorial/inputoutput.html#reading-and-writing-files
# http://pythonworld.ru/tipy-dannyx-v-python/fajly-rabota-s-fajlami.html
if __name__ == '__main__':
# Открыть файл в режиме записи
with open('foo.txt', mode='w') as f:
f.write('123\n')
f.write('one two\n')
f.write('one two\n')
f.write('раз два\n') |
py | 1a4bf8c494730640a94c1c8bf10103e99359f677 | """
The command package
"""
__version__ = "4.3.8"
|
py | 1a4bf971feb0c8f97a899b51565f10856647cab4 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests of the Transactions class of the tac negotiation skill."""
import datetime
import logging
from pathlib import Path
from typing import Deque, Tuple, cast
from unittest.mock import Mock, patch
import pytest
from aea.exceptions import AEAEnforceError
from aea.helpers.transaction.base import Terms
from aea.protocols.dialogue.base import DialogueLabel
from aea.test_tools.test_skill import BaseSkillTestCase, COUNTERPARTY_ADDRESS
from packages.fetchai.skills.tac_negotiation.dialogues import FipaDialogue
from packages.fetchai.skills.tac_negotiation.transactions import Transactions
from tests.conftest import ROOT_DIR
class TestTransactions(BaseSkillTestCase):
"""Test Transactions class of tac negotiation."""
path_to_skill = Path(ROOT_DIR, "packages", "fetchai", "skills", "tac_negotiation")
@classmethod
def setup(cls):
"""Setup the test class."""
super().setup()
cls.pending_transaction_timeout = 30
cls.transactions = Transactions(
pending_transaction_timeout=cls.pending_transaction_timeout,
name="transactions",
skill_context=cls._skill.skill_context,
)
cls.nonce = "125"
cls.sender = "some_sender_address"
cls.counterparty = "some_counterparty_address"
cls.ledger_id = "some_ledger_id"
cls.terms = Terms(
ledger_id=cls.ledger_id,
sender_address=cls.sender,
counterparty_address=cls.counterparty,
amount_by_currency_id={"1": 10},
quantities_by_good_id={"2": -5},
is_sender_payable_tx_fee=True,
nonce=cls.nonce,
fee_by_currency_id={"1": 1},
)
cls.dialogue_label = DialogueLabel(
("", ""), COUNTERPARTY_ADDRESS, cls._skill.skill_context.agent_address,
)
cls.proposal_id = 5
cls.transaction_id = "some_transaction_id"
def test_simple_properties(self):
"""Test the properties of Transactions class."""
assert self.transactions.pending_proposals == {}
assert self.transactions.pending_initial_acceptances == {}
def test_get_next_nonce(self):
"""Test the get_next_nonce method of the Transactions class."""
assert self.transactions.get_next_nonce() == "1"
assert self.transactions._nonce == 1
def test_update_confirmed_transactions(self):
"""Test the update_confirmed_transactions method of the Transactions class."""
# setup
self.skill.skill_context._get_agent_context().shared_state[
"confirmed_tx_ids"
] = [self.transaction_id]
self.transactions._locked_txs[self.transaction_id] = self.terms
self.transactions._locked_txs_as_buyer[self.transaction_id] = self.terms
self.transactions._locked_txs_as_seller[self.transaction_id] = self.terms
# operation
self.transactions.update_confirmed_transactions()
# after
assert self.transactions._locked_txs == {}
assert self.transactions._locked_txs_as_buyer == {}
assert self.transactions._locked_txs_as_seller == {}
def test_cleanup_pending_transactions_i(self):
"""Test the cleanup_pending_transactions method of the Transactions class where _last_update_for_transactions is NOT empty."""
# setup
datetime_mock = Mock(wraps=datetime.datetime)
datetime_mock.now.return_value = datetime.datetime.strptime(
"01 01 2020 00:03", "%d %m %Y %H:%M"
)
with patch("datetime.datetime", new=datetime_mock):
self.transactions._register_transaction_with_time(self.transaction_id)
self.transactions._locked_txs[self.transaction_id] = self.terms
self.transactions._locked_txs_as_buyer[self.transaction_id] = self.terms
self.transactions._locked_txs_as_seller[self.transaction_id] = self.terms
# operation
with patch.object(self.skill.skill_context.logger, "log") as mock_logger:
self.transactions.cleanup_pending_transactions()
# after
mock_logger.assert_any_call(
logging.DEBUG,
f"removing transaction from pending list: {self.transaction_id}",
)
assert self.transactions._locked_txs == {}
assert self.transactions._locked_txs_as_buyer == {}
assert self.transactions._locked_txs_as_seller == {}
def test_cleanup_pending_transactions_ii(self):
"""Test the cleanup_pending_transactions method of the Transactions class where _last_update_for_transactions is empty."""
# setup
cast(
Deque[Tuple[datetime.datetime, str]],
self.transactions._last_update_for_transactions,
)
assert self.transactions._locked_txs == {}
assert self.transactions._locked_txs_as_buyer == {}
assert self.transactions._locked_txs_as_seller == {}
# operation
self.transactions.cleanup_pending_transactions()
# after
assert self.transactions._locked_txs == {}
assert self.transactions._locked_txs_as_buyer == {}
assert self.transactions._locked_txs_as_seller == {}
def test_add_pending_proposal_i(self):
"""Test the add_pending_proposal method of the Transactions class."""
# before
assert self.dialogue_label not in self.transactions._pending_proposals
# operation
self.transactions.add_pending_proposal(
self.dialogue_label, self.proposal_id, self.terms
)
# after
assert (
self.transactions._pending_proposals[self.dialogue_label][self.proposal_id]
== self.terms
)
def test_add_pending_proposal_ii(self):
"""Test the add_pending_proposal method of the Transactions class where dialogue_label IS in _pending_proposals."""
# setup
self.transactions._pending_proposals[self.dialogue_label] = {1: self.terms}
# operation
with pytest.raises(
AEAEnforceError,
match="Proposal is already in the list of pending proposals.",
):
self.transactions.add_pending_proposal(
self.dialogue_label, self.proposal_id, self.terms
)
def test_add_pending_proposal_iii(self):
"""Test the add_pending_proposal method of the Transactions class where proposal_id IS in _pending_proposals."""
# setup
self.transactions._pending_proposals[self.dialogue_label][
self.proposal_id
] = self.terms
# operation
with pytest.raises(
AEAEnforceError,
match="Proposal is already in the list of pending proposals.",
):
self.transactions.add_pending_proposal(
self.dialogue_label, self.proposal_id, self.terms
)
def test_pop_pending_proposal_i(self):
"""Test the pop_pending_proposal method of the Transactions class."""
# setup
self.transactions.add_pending_proposal(
self.dialogue_label, self.proposal_id, self.terms
)
# operation
actual_terms = self.transactions.pop_pending_proposal(
self.dialogue_label, self.proposal_id
)
# after
assert actual_terms == self.terms
assert (
self.proposal_id
not in self.transactions._pending_proposals[self.dialogue_label]
)
def test_pop_pending_proposal_ii(self):
"""Test the pop_pending_proposal method of the Transactions class where dialogue_label IS in _pending_proposals."""
# setup
self.transactions.add_pending_proposal(
self.dialogue_label, self.proposal_id, self.terms
)
self.transactions._pending_proposals = {}
# operation
with pytest.raises(
AEAEnforceError,
match="Cannot find the proposal in the list of pending proposals.",
):
assert self.transactions.pop_pending_proposal(
self.dialogue_label, self.proposal_id
)
def test_pop_pending_proposal_iii(self):
"""Test the pop_pending_proposal method of the Transactions class where dialogue_label and proposal_id IS in _pending_proposals."""
# setup
self.transactions.add_pending_proposal(
self.dialogue_label, self.proposal_id, self.terms
)
self.transactions._pending_proposals[self.dialogue_label] = {1: self.terms}
# operation
with pytest.raises(
AEAEnforceError,
match="Cannot find the proposal in the list of pending proposals.",
):
assert self.transactions.pop_pending_proposal(
self.dialogue_label, self.proposal_id
)
def test_add_pending_initial_acceptance_i(self):
"""Test the add_pending_initial_acceptance method of the Transactions class."""
# before
assert self.transactions._pending_initial_acceptances == {}
# operation
self.transactions.add_pending_initial_acceptance(
self.dialogue_label, self.proposal_id, self.terms,
)
# after
assert (
self.transactions._pending_initial_acceptances[self.dialogue_label][
self.proposal_id
]
== self.terms
)
def test_add_pending_initial_acceptance_ii(self):
"""Test the add_pending_initial_acceptance method of the Transactions class where dialogue_label IS in _pending_initial_acceptances."""
# setup
self.transactions._pending_initial_acceptances[self.dialogue_label] = {
1: self.terms
}
# operation
with pytest.raises(
AEAEnforceError,
match="Initial acceptance is already in the list of pending initial acceptances.",
):
self.transactions.add_pending_initial_acceptance(
self.dialogue_label, self.proposal_id, self.terms,
)
def test_add_pending_initial_acceptance_iii(self):
"""Test the add_pending_initial_acceptance method of the Transactions class where dialogue_label and proposal_id IS in _pending_initial_acceptances."""
# setup
self.transactions._pending_initial_acceptances[self.dialogue_label] = {
self.proposal_id: self.terms
}
# operation
with pytest.raises(
AEAEnforceError,
match="Initial acceptance is already in the list of pending initial acceptances.",
):
self.transactions.add_pending_initial_acceptance(
self.dialogue_label, self.proposal_id, self.terms,
)
def test_pop_pending_initial_acceptance_i(self):
"""Test the pop_pending_initial_acceptance method of the Transactions class."""
# setup
self.transactions.add_pending_initial_acceptance(
self.dialogue_label, self.proposal_id, self.terms,
)
# operation
actual_terms = self.transactions.pop_pending_initial_acceptance(
self.dialogue_label, self.proposal_id
)
# after
assert actual_terms == self.terms
assert (
self.proposal_id
not in self.transactions._pending_proposals[self.dialogue_label]
)
def test_pop_pending_initial_acceptance_ii(self):
"""Test the pop_pending_initial_acceptance method of the Transactions class where dialogue_label IS in _pending_initial_acceptances."""
# setup
self.transactions.add_pending_initial_acceptance(
self.dialogue_label, self.proposal_id, self.terms,
)
self.transactions._pending_initial_acceptances = {}
# operation
with pytest.raises(
AEAEnforceError,
match="Cannot find the initial acceptance in the list of pending initial acceptances.",
):
assert self.transactions.pop_pending_initial_acceptance(
self.dialogue_label, self.proposal_id
)
def test_pop_pending_initial_acceptance_iii(self):
"""Test the pop_pending_initial_acceptance method of the Transactions class where dialogue_label and proposal_id IS in _pending_initial_acceptances."""
# setup
self.transactions.add_pending_initial_acceptance(
self.dialogue_label, self.proposal_id, self.terms,
)
self.transactions._pending_initial_acceptances[self.dialogue_label] = {
1: self.terms
}
# operation
with pytest.raises(
AEAEnforceError,
match="Cannot find the initial acceptance in the list of pending initial acceptances.",
):
assert self.transactions.pop_pending_initial_acceptance(
self.dialogue_label, self.proposal_id
)
def test_register_transaction_with_time(self):
"""Test the _register_transaction_with_time method of the Transactions class."""
# setup
datetime_mock = Mock(wraps=datetime.datetime)
mocked_now = datetime.datetime.strptime("01 01 2020 00:03", "%d %m %Y %H:%M")
datetime_mock.now.return_value = mocked_now
# operation
with patch("datetime.datetime", new=datetime_mock):
self.transactions._register_transaction_with_time(self.transaction_id)
# after
assert (mocked_now, self.transaction_id,)[
1
] == self.transactions._last_update_for_transactions[0][1]
def test_add_locked_tx_seller(self):
"""Test the add_locked_tx method of the Transactions class as Seller."""
# setup
datetime_mock = Mock(wraps=datetime.datetime)
mocked_now = datetime.datetime.strptime("01 01 2020 00:03", "%d %m %Y %H:%M")
datetime_mock.now.return_value = mocked_now
# operation
with patch("datetime.datetime", new=datetime_mock):
self.transactions.add_locked_tx(self.terms, FipaDialogue.Role.SELLER)
# after
assert (mocked_now, self.terms.id,)[
1
] == self.transactions._last_update_for_transactions[0][1]
assert self.transactions._locked_txs[self.terms.id] == self.terms
assert self.transactions._locked_txs_as_seller[self.terms.id] == self.terms
assert self.terms.id not in self.transactions._locked_txs_as_buyer
def test_add_locked_tx_buyer(self):
"""Test the add_locked_tx method of the Transactions class as Seller."""
# setup
datetime_mock = Mock(wraps=datetime.datetime)
mocked_now = datetime.datetime.strptime("01 01 2020 00:03", "%d %m %Y %H:%M")
datetime_mock.now.return_value = mocked_now
# operation
with patch("datetime.datetime", new=datetime_mock):
self.transactions.add_locked_tx(self.terms, FipaDialogue.Role.BUYER)
# after
assert (mocked_now, self.terms.id,)[
1
] == self.transactions._last_update_for_transactions[0][1]
assert self.transactions._locked_txs[self.terms.id] == self.terms
assert self.transactions._locked_txs_as_buyer[self.terms.id] == self.terms
assert self.terms.id not in self.transactions._locked_txs_as_seller
def test_add_locked_tx_fails(self):
"""Test the add_locked_tx method of the Transactions class where transaction_id IS in _locked_txs."""
# setup
self.transactions._locked_txs[self.terms.id] = self.terms
datetime_mock = Mock(wraps=datetime.datetime)
mocked_now = datetime.datetime.strptime("01 01 2020 00:03", "%d %m %Y %H:%M")
datetime_mock.now.return_value = mocked_now
# operation
with patch("datetime.datetime", new=datetime_mock):
with pytest.raises(
AEAEnforceError,
match="This transaction is already a locked transaction.",
):
self.transactions.add_locked_tx(self.terms, FipaDialogue.Role.BUYER)
# after
assert (
mocked_now,
self.terms.id,
) not in self.transactions._last_update_for_transactions
assert self.terms.id not in self.transactions._locked_txs_as_buyer
assert self.terms.id not in self.transactions._locked_txs_as_seller
def test_pop_locked_tx(self):
"""Test the pop_locked_tx method of the Transactions class."""
# setup
self.transactions.add_locked_tx(self.terms, FipaDialogue.Role.BUYER)
# before
assert self.terms.id in self.transactions._locked_txs
assert self.terms.id in self.transactions._locked_txs_as_buyer
assert self.terms.id not in self.transactions._locked_txs_as_seller
# operation
actual_terms = self.transactions.pop_locked_tx(self.terms)
# after
assert actual_terms == self.terms
assert self.terms.id not in self.transactions._locked_txs
assert self.terms.id not in self.transactions._locked_txs_as_buyer
assert self.terms.id not in self.transactions._locked_txs_as_seller
def test_pop_locked_tx_fails(self):
"""Test the pop_locked_tx method of the Transactions class where terms.id is NOT in _locked_txs."""
# before
assert self.terms.id not in self.transactions._locked_txs
assert self.terms.id not in self.transactions._locked_txs_as_buyer
assert self.terms.id not in self.transactions._locked_txs_as_seller
# operation
with pytest.raises(
AEAEnforceError,
match="Cannot find this transaction in the list of locked transactions.",
):
self.transactions.pop_locked_tx(self.terms)
# after
assert self.terms.id not in self.transactions._locked_txs
assert self.terms.id not in self.transactions._locked_txs_as_buyer
assert self.terms.id not in self.transactions._locked_txs_as_seller
def test_ownership_state_after_locks(self):
"""Test the ownership_state_after_locks method of the Transactions class."""
# ToDo
|
py | 1a4bf9d1d591fffcffd3254fe76507683263488d | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: acquisition_network_device.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='acquisition_network_device.proto',
package='ts_mon.proto',
serialized_pb=_b('\n acquisition_network_device.proto\x12\x0cts_mon.proto\"\x95\x01\n\rNetworkDevice\x12\x11\n\talertable\x18\x65 \x01(\x08\x12\r\n\x05realm\x18\x66 \x01(\t\x12\r\n\x05metro\x18h \x01(\t\x12\x0c\n\x04role\x18i \x01(\t\x12\x10\n\x08hostname\x18j \x01(\t\x12\x11\n\thostgroup\x18l \x01(\t\" \n\x06TypeId\x12\x16\n\x0fMESSAGE_TYPE_ID\x10\xd5\x9d\x9e\x10')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_NETWORKDEVICE_TYPEID = _descriptor.EnumDescriptor(
name='TypeId',
full_name='ts_mon.proto.NetworkDevice.TypeId',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MESSAGE_TYPE_ID', index=0, number=34049749,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=168,
serialized_end=200,
)
_sym_db.RegisterEnumDescriptor(_NETWORKDEVICE_TYPEID)
_NETWORKDEVICE = _descriptor.Descriptor(
name='NetworkDevice',
full_name='ts_mon.proto.NetworkDevice',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='alertable', full_name='ts_mon.proto.NetworkDevice.alertable', index=0,
number=101, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='realm', full_name='ts_mon.proto.NetworkDevice.realm', index=1,
number=102, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='metro', full_name='ts_mon.proto.NetworkDevice.metro', index=2,
number=104, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='role', full_name='ts_mon.proto.NetworkDevice.role', index=3,
number=105, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hostname', full_name='ts_mon.proto.NetworkDevice.hostname', index=4,
number=106, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hostgroup', full_name='ts_mon.proto.NetworkDevice.hostgroup', index=5,
number=108, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_NETWORKDEVICE_TYPEID,
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=51,
serialized_end=200,
)
_NETWORKDEVICE_TYPEID.containing_type = _NETWORKDEVICE
DESCRIPTOR.message_types_by_name['NetworkDevice'] = _NETWORKDEVICE
NetworkDevice = _reflection.GeneratedProtocolMessageType('NetworkDevice', (_message.Message,), dict(
DESCRIPTOR = _NETWORKDEVICE,
__module__ = 'acquisition_network_device_pb2'
# @@protoc_insertion_point(class_scope:ts_mon.proto.NetworkDevice)
))
_sym_db.RegisterMessage(NetworkDevice)
# @@protoc_insertion_point(module_scope)
|
py | 1a4bfafd4cb2881270c552170c005b9893f9c6fe | #
# Copyright (C) 2019 Luca Pasqualini
# University of Siena - Artificial Intelligence Laboratory - SAILab
#
#
# USienaRL is licensed under a BSD 3-Clause.
#
# You should have received a copy of the license along with this
# work. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
# Import packages
import logging
import numpy
import tensorflow
# Import usienarl
from usienarl import Agent, Interface, SpaceType
from usienarl.models import DeepDeterministicPolicyGradient
class RandomAgent(Agent):
"""
Random agents taking random actions in all modes.
It does not require warm-up.
Note: no save/restore is ever performed by this agent.
"""
def __init__(self,
name: str):
# Generate base agent
super(RandomAgent, self).__init__(name)
def setup(self,
logger: logging.Logger,
scope: str,
parallel: int,
observation_space_type: SpaceType, observation_space_shape: (),
agent_action_space_type: SpaceType, agent_action_space_shape: (),
summary_path: str = None, save_path: str = None, saves_to_keep: int = 0) -> bool:
# Make sure parameters are correct
assert(parallel > 0)
logger.info("Setup of agent " + self._name + " with scope " + scope + "...")
# Reset agent attributes
self._scope = scope
self._parallel = parallel
self._observation_space_type: SpaceType = observation_space_type
self._observation_space_shape = observation_space_shape
self._agent_action_space_type: SpaceType = agent_action_space_type
self._agent_action_space_shape = agent_action_space_shape
# Use a blank generate method
if not self._generate(logger,
observation_space_type, observation_space_shape,
agent_action_space_type, agent_action_space_shape):
return False
# Validate setup
return True
def restore(self,
logger: logging.Logger,
session,
path: str) -> bool:
return True
def save(self,
logger: logging.Logger,
session):
pass
def _generate(self,
logger: logging.Logger,
observation_space_type: SpaceType, observation_space_shape: (),
agent_action_space_type: SpaceType, agent_action_space_shape: ()) -> bool:
return True
def initialize(self,
logger: logging.Logger,
session):
pass
def act_warmup(self,
logger: logging.Logger,
session,
interface: Interface,
agent_observation_current: numpy.ndarray,
warmup_step: int, warmup_episode: int):
pass
def act_train(self,
logger: logging.Logger,
session,
interface: Interface,
agent_observation_current: numpy.ndarray,
train_step: int, train_episode: int):
# Act randomly
return interface.sample_agent_action(logger, session)
def act_inference(self,
logger: logging.Logger,
session,
interface: Interface,
agent_observation_current: numpy.ndarray,
inference_step: int, inference_episode: int):
# Act randomly
return interface.sample_agent_action(logger, session)
def complete_step_warmup(self,
logger: logging.Logger,
session,
interface: Interface,
agent_observation_current: numpy.ndarray,
agent_action: numpy.ndarray,
reward: numpy.ndarray,
episode_done: numpy.ndarray,
agent_observation_next: numpy.ndarray,
warmup_step: int, warmup_episode: int):
pass
def complete_step_train(self,
logger: logging.Logger,
session,
interface: Interface,
agent_observation_current: numpy.ndarray,
agent_action: numpy.ndarray,
reward: numpy.ndarray,
episode_done: numpy.ndarray,
agent_observation_next: numpy.ndarray,
train_step: int, train_episode: int):
pass
def complete_step_inference(self,
logger: logging.Logger,
session,
interface: Interface,
agent_observation_current: numpy.ndarray,
agent_action: numpy.ndarray,
reward: numpy.ndarray,
episode_done: numpy.ndarray,
agent_observation_next: numpy.ndarray,
inference_step: int, inference_episode: int):
pass
def complete_episode_warmup(self,
logger: logging.Logger,
session,
interface: Interface,
last_step_reward: numpy.ndarray,
episode_total_reward: numpy.ndarray,
warmup_step: int, warmup_episode: int):
pass
def complete_episode_train(self,
logger: logging.Logger,
session,
interface: Interface,
last_step_reward: numpy.ndarray,
episode_total_reward: numpy.ndarray,
train_step: int, train_episode: int):
pass
def complete_episode_inference(self,
logger: logging.Logger,
session,
interface: Interface,
last_step_reward: numpy.ndarray,
episode_total_reward: numpy.ndarray,
inference_step: int, inference_episode: int):
pass
@property
def saved_variables(self):
return None
@property
def warmup_steps(self) -> int:
return 0
|
py | 1a4bfc87289f5eb82c0efe5c8300e752a0cd24f6 | import re as _re
import numpy as _np
import copy as _copy
import gdspy as _gdspy
import os as _os
import time as _time
from . import utils
from . import geometry
_PRINT_LIT_REDUCTION = False
class CallTree:
_operators = [['make'], ['.'], ['^'], ['*', '/'], ['-', '+'], ['pstart', 'pend'],
['psep'], ['.'], ['='], [',']]
def __init__(self, root, text=""):
self._root = root
self._children = []
self._names = {}
self._func = ""
if text:
nodeStack = [self, ]
textbuffer = ""
strDelimiter = ''
for i, c in enumerate(text):
if strDelimiter and c != strDelimiter:
textbuffer += c
elif strDelimiter and c == strDelimiter:
textbuffer += c
strDelimiter = ''
elif c in ['"', "'"]:
strDelimiter = c
textbuffer += c
elif c == "(":
top = nodeStack[-1]
new = CallTree(root)
m = _re.search("[^a-zA-Z0-9_]", textbuffer[::-1])
if m:
new._func = textbuffer[len(textbuffer)-m.start():]
top._addText(textbuffer[:len(textbuffer)-m.start()])
else:
new._func = textbuffer
top._children.append(new)
nodeStack.append(new)
textbuffer = ""
elif c == ")":
nodeStack[-1]._addText(textbuffer)
nodeStack.pop()
textbuffer = ""
else:
textbuffer += c
if len(nodeStack) == 0:
raise ValueError("Additional ')' at:\n'"+utils.shortenText(text[i-30:i+30], maxLength=1e99)+"'")
if len(nodeStack) > 1:
raise ValueError("Additional '('.")
def _addText(self, text):
text = text.strip()
if len(text) > 0:
self._children.append(text.strip())
def _py2lit(self, *vals):
res = []
for val in vals:
if type(val) is list and type(val[0]) is str:
res.append(val)
elif type(val) is float:
res.append(['float', val])
elif type(val) is int:
res.append(['int', val])
elif type(val) is list and len(val) == 2:
res.append(['point', val])
elif type(val) is str:
res.append(['string', val])
elif isinstance(val, geometry.Shape):
res.append(['shape', val])
else:
raise ValueError("Uknown variable type '"+str(type(val))+"'")
if len(vals) == 1:
return res[0]
return res
def createLiterals(self):
for child in self._children:
if type(child) is CallTree:
child.createLiterals()
#=====================================================================
# generate literals from text
i = 0
inPoint = False
while i < len(self._children):
if type(self._children[i]) is str:
i, inPoint = self._parseStr(i, inPoint)
else:
i += 1
#=====================================================================
# accumulate children
i = 0
while i < len(self._children)-1:
if (hasattr(self._children[i], "_literals")
and hasattr(self._children[i+1], "_literals")):
self._children[i]._literals += self._children.pop(i+1)._literals
else:
i += 1
def _instanciateShape(self, obj, largs, dargs):
tree = obj['tree']
argdict = {k: None for k in obj['args']}
if len(largs) > len(argdict):
raise ValueError("Too many list args in parametric shape call: '{}'.".format(self._func))
if len(argdict) > 0:
for targetKey, listArg in zip(obj['args'], largs):
argdict[targetKey] = self._py2lit(listArg)
for key, val in dargs.items():
if argdict[key] is None:
argdict[key] = self._py2lit(val)
else:
raise ValueError("Argument specified by list arg and named arg in parametric shape call: '{}'.".format(self._func))
if None in argdict.values():
raise ValueError("To few arguements in parametric shape call: '{}'.".format(self._func))
unresolvedNames = tree.resolveNames(argdict)
if unresolvedNames:
raise ValueError("Unresolved names "
+", ".join(['"'+s+'"' for s in unresolvedNames])
+" in imported parameteric shape call: ".format(self._func))
tree.evaluate()
return tree.getShape()
def evaluate(self):
for child in self._children:
if type(child) is CallTree:
child.evaluate()
#=====================================================================
# accumulate children
i = 0
while i < len(self._children)-1:
if (hasattr(self._children[i], "_literals")
and hasattr(self._children[i+1], "_literals")):
self._children[i]._literals += self._children.pop(i+1)._literals
else:
i += 1
#=====================================================================
# reduce literals
if len(self._children) > 1:
raise ValueError("Fatal error: children without literals not allowed.")
self.resolveNames({})
self._reduceLiterals()
#=====================================================================
# prepare function parsing
if self._func == "":
if len(self._children) == 1:
self._literals = [self._result]
else:
unresolvedNames = []
largs = []
dargs = {}
# multiple arguments
if self._result[0] == "argumentlist":
for lit in self._result[1]:
if lit[0] == 'assignment':
if lit[1][1][0] == 'name':
unresolvedNames.append(lit[1][1][1])
dargs[lit[1][0]] = lit[1][1][1]
else:
if lit[0] == 'name':
unresolvedNames.append(lit[1])
largs.append(lit[1])
# only one argument
elif self._result[0] != "none":
if self._result[0] == 'name':
unresolvedNames.append(self._result[1])
largs = [self._result[1]]
dargs = {}
def requireResolvedNamesOnly():
if unresolvedNames:
raise ValueError('Unresolved name(s): '
+', '.join(['"'+s+'"' for s in unresolvedNames])
+' in argumentlist of func "{}".'.format(self._func))
if _PRINT_LIT_REDUCTION:
utils.debug('Evaluate function "'+self._func+'", largs='
+str(largs)+', dargs='+str(dargs))
#=====================================================================
# rect function
if self._func == "rect":
requireResolvedNamesOnly()
self._literals = [['shape', geometry.Rect(*largs, **dargs)]]
#=====================================================================
# polygon function
elif self._func == "polygon":
requireResolvedNamesOnly()
self._literals = [['shape', geometry.Polygon(*largs, **dargs)]]
#=====================================================================
# text function
elif self._func == "text":
requireResolvedNamesOnly()
self._literals = [['shape', geometry.Text(*largs, **dargs)]]
#=====================================================================
# translate function
elif self._func == "translate":
requireResolvedNamesOnly()
self._literals = [['func', utils.TypeCheck(["shape", "point", "shaperef"])
+geometry.Translator(*largs, **dargs)]]
#=====================================================================
# rotate function
elif self._func == "rotate":
requireResolvedNamesOnly()
self._literals = [['func', utils.TypeCheck(["shape", "point", "shaperef"])
+geometry.Rotator(*largs, **dargs)]]
#=====================================================================
# mirror function
elif self._func == "mirror":
requireResolvedNamesOnly()
self._literals = [['func', utils.TypeCheck(["shape"])
+geometry.Mirrower(*largs, **dargs)]]
#=====================================================================
# grow function
elif self._func == "grow":
requireResolvedNamesOnly()
self._literals = [['func', utils.TypeCheck("shape")
+geometry.Grower(*largs, **dargs)]]
#=====================================================================
# smooth function
elif self._func == "round":
requireResolvedNamesOnly()
self._literals = [['func', utils.TypeCheck("shape")
+geometry.Rounder(*largs, **dargs)]]
#=====================================================================
# create array of shapes
elif self._func == "array":
requireResolvedNamesOnly()
self._literals = [['func', utils.TypeCheck(["shape", "shaperef"])
+geometry.Arrayer(*largs, **dargs)]]
#=====================================================================
# multiple calls to parametric shapes
elif self._func == "call":
requireResolvedNamesOnly()
self._literals = [['func', utils.TypeCheck(['name', 'tree'], returnType='raw')
+utils.Caller(self._root, *largs, **dargs)]]
#=====================================================================
# cast float to int
elif self._func == "int":
requireResolvedNamesOnly()
if len(dargs) > 0 or len(largs) != 1:
raise ValueError("Invalid arguments to 'int' call.")
self._literals = [['int', int(largs[0])]]
#=====================================================================
# absolute
elif self._func == "abs":
requireResolvedNamesOnly()
if len(dargs) > 0 or len(largs) != 1:
raise ValueError("Invalid arguments to 'abs' call.")
self._literals = [['float', abs(largs[0])]]
#=====================================================================
# create letter from number
elif self._func == "char":
requireResolvedNamesOnly()
letters = "abcdefghijklmnopqrstuvwxyz"
if len(dargs) > 0 or len(largs) != 1 or largs[0] > len(letters):
raise ValueError("Invalid arguments to 'char' call.")
self._literals = [['string', letters[int(largs[0])]]]
#=====================================================================
# min/max/mean functions
elif self._func in ["min", "max", "mean"]:
requireResolvedNamesOnly()
if len(dargs) > 0:
raise ValueError("Function '"+self._func+"' does not support named arguments.")
if len(largs) == 0:
raise ValueError("Function '"+self._func+"' needs more than one argument.")
try:
largs = [float(f) for f in largs]
except:
raise ValueError("Function '"+self._func+"' supports only numerical inputs.")
fdict = {"min": min, "max": max, "mean": lambda l: sum(l)/len(l)}
self._literals = [['float', fdict[self._func](largs)]]
#=====================================================================
# trigonometric functions
elif self._func in ["cos", "sin", "tan", "asin", "acos", "atan"]:
requireResolvedNamesOnly()
if len(largs) != 1 or any([a not in ['unit'] for a in dargs]):
raise ValueError("Invalid arguments to 'cos' function.")
u = dargs.get('unit', 'deg')
if u == 'deg':
largs[0] *= _np.pi/180
elif u == 'rad':
pass
else:
raise ValueError("Invalid value for 'unit' argument in 'cos' function.")
if self._func == "sin":
self._literals = [['float', _np.sin(largs[0])]]
elif self._func == "cos":
self._literals = [['float', _np.cos(largs[0])]]
elif self._func == "tan":
self._literals = [['float', _np.tan(largs[0])]]
elif self._func == "asin":
self._literals = [['float', 180/_np.pi*_np.arcsin(largs[0])]]
elif self._func == "acos":
self._literals = [['float', 180/_np.pi*_np.arccos(largs[0])]]
else:
self._literals = [['float', 180/_np.pi*_np.arctan(largs[0])]]
#=====================================================================
# arctan2
elif self._func == "atan2":
requireResolvedNamesOnly()
if len(dargs) > 0 or len(largs) != 2:
raise ValueError("Invalid arguments to 'abs' call.")
self._literals = [['float', 180/_np.pi*_np.arctan2(largs[0], largs[1])]]
#=====================================================================
# calculate height of shape
elif self._func == "height":
requireResolvedNamesOnly()
if len(largs) != 1:
raise ValueError("Invalid arguments to 'height' function.")
self._literals = [['float', largs[0].height()]]
#=====================================================================
# calculate width of shape
elif self._func == "width":
requireResolvedNamesOnly()
if len(largs) != 1:
raise ValueError("Invalid arguments to 'width' function.")
self._literals = [['float', largs[0].width()]]
#=====================================================================
# calculate bounding box
elif self._func == "bb":
requireResolvedNamesOnly()
if len(largs) != 1:
raise ValueError("Invalid arguments to 'bb' function.")
self._literals = [['shape', largs[0].boundingBox()]]
#=====================================================================
# calculate center of mass
elif self._func == "center":
requireResolvedNamesOnly()
if len(largs) != 1:
raise ValueError("Invalid arguments to 'center' function.")
self._literals = [['point', largs[0].center()]]
#=====================================================================
# instanciate shapes
elif self._func in self._root.shapeDict:
requireResolvedNamesOnly()
obj = _copy.deepcopy(self._root.shapeDict[self._func])
shape = self._instanciateShape(obj, largs, dargs)
utils.debug('self._literals = ["shape", '+str(shape)+']')
self._literals = [['shape', shape]]
#=====================================================================
# look in imported database
elif self._func in [name for lib in self._root.importDict.values()
for name in lib.shapeDict.keys()]:
self._literals = [['import', self._func, [largs, dargs]]]
#=====================================================================
# create symbol reference:
elif self._func == 'ref':
if len(largs) == 1:
self._literals = [['shaperef', _gdspy.CellReference(self._root.gdsLib.cells[largs[0]])]]
elif len(largs) > 1:
if largs[0] not in self._root.paramSymDict:
raise ValueError('Parametric symbol "'+str(largs[0])+'" was not defined. '
+'(Symbols may only be used after their definition)')
paramSym = self._root.paramSymDict[largs[0]]
symParams = largs[1:]
self._literals = [['paramshaperef', paramSym], ['operator', 'make'],
['argumentlist',
[['name', p] if type(p) is str
else self._py2lit(p) for p in symParams]]]
else:
raise ValueError("Invalid function/shape '{}'.".format(self._func))
if _PRINT_LIT_REDUCTION:
utils.debug('Evaluation result: ['+', '.join(['['+l[0]+', '
+utils.shortenText(str(l[1]), maxLength=10)+']' for l in self._literals])+']')
def _parseStr(self, childId, inPoint=False):
#=====================================================================
# Split string in literals 'str', 'int', 'float', 'name', 'operator'
# and 'point'
appliedChange = False
s = self._children[childId]
if not hasattr(self._children[childId], "_literals"):
literals = []
strDelimiter = ''
buf = ''
inNumber = False
inName = False
s = s + ' '
for prevC, c, nextC in zip(' ' + s[:-1], s, s[1:] + ' '):
while True:
reparseChar = False
if strDelimiter:
if c == strDelimiter:
strDelimiter = ''
literals.append(['string', buf])
else:
buf += c
elif inNumber:
if _re.match('[0-9.e]', c) or c in ['+', '-'] and prevC == 'e':
buf += c
else:
n = float(buf)
if n - round(n) < 1e-6 * n:
literals.append(['int', n])
else:
literals.append(['float', n])
inNumber = False
reparseChar = True
elif inName:
if _re.match('[a-zA-Z0-9_]', c):
buf += c
else:
utils.testValidName(buf)
literals.append(['name', buf])
inName = False
reparseChar = True
else:
if c in ['"', "'"]:
strDelimiter = c
buf = ''
elif c == '[':
literals.append(['operator', 'pstart'])
inPoint = True
elif inPoint and c == ',':
literals.append(['operator', 'psep'])
elif c == ']':
literals.append(['operator', 'pend'])
inPoint = False
elif _re.match('[0-9]', c) or c == '.' and _re.match('[0-9]', nextC):
reparseChar = True
inNumber = True
buf = ''
elif c in [op for ops in self._operators for op in ops]:
literals.append(['operator', c])
elif _re.match('[a-zA-Z_]', c):
reparseChar = True
inName = True
buf = ''
elif _re.match('\s', c):
pass
else:
raise ValueError("Unexpected character '{}'".format(c))
if not reparseChar:
break
self._children[childId] = CallTree(self._root)
self._children[childId]._literals = literals
return childId + 1, inPoint
def _reduceLiterals(self):
if hasattr(self, '_result'):
return
if _PRINT_LIT_REDUCTION:
utils.debug("Start reducing:")
utils.debug()
if len(self._children) == 0:
self._result = ['none', None]
return
literals = self._children[0]._literals
for ops in self._operators:
i = 0
#=====================================================================
# helper functions
def popNextLit():
if i < len(literals) - 1:
return literals.pop(i+1)
else:
return None
def popPrevLit():
nonlocal i
if i > 0:
i -= 1
return literals.pop(i)
else:
return None
def viewNextLit():
if i < len(literals) - 1:
return literals[i+1]
else:
return None
def viewPrevLit():
if i > 0:
return literals[i-1]
else:
return None
def isNextLitType(types):
if i < len(literals) - 1:
lit = literals[i+1]
else:
return False
if type(types) is list:
return lit != None and lit[0] in types
else:
return lit != None and lit[0] == types
def isPrevLitType(types):
if i > 0:
lit = literals[i-1]
else:
return False
if type(types) is list:
return lit[0] in types
else:
return lit[0] == types
#=====================================================================
# evaluate operators
while i < len(literals):
l = literals[i]
if l[0] == 'tree':
self.resolveNames({})
elif l[0] == 'operator' and l[1] in ops:
if _PRINT_LIT_REDUCTION:
utils.debug(literals)
#=====================================================================
# two scalar numeric operands
if (l[1] in ['^', '*', '/', '+', '-']
and isNextLitType(['float', 'int'])
and isPrevLitType(['float', 'int'])):
op1 = popPrevLit()
op2 = popNextLit()
if l[1] == '^':
if 'float' in [op1[0] or op2[0]] and op2[1] > 0:
ty = 'float'
else:
ty = 'int'
literals[i] = [ty, pow(op1[1], op2[1])]
elif l[1] == '*':
if 'float' in [op1[0] or op2[0]]:
ty = 'float'
else:
ty = 'int'
literals[i] = [ty, op1[1] * op2[1]]
elif l[1] == '/':
literals[i] = ['float', op1[1]/op2[1]]
elif l[1] == '+':
if 'float' in [op1[0] or op2[0]]:
ty = 'float'
else:
ty = 'int'
literals[i] = [ty, op1[1] + op2[1]]
elif l[1] == '-':
if 'float' in [op1[0] or op2[0]]:
ty = 'float'
else:
ty = 'int'
literals[i] = [ty, op1[1] - op2[1]]
#=====================================================================
# plus and minus for points
elif (l[1] in ['+', '-'] and isNextLitType('point')
and isPrevLitType('point')):
op1 = popPrevLit()
op2 = popNextLit()
if l[1] == '+':
literals[i] = ['point', [p1+p2 for p1,p2 in zip(op1,op2)]]
elif l[1] == '-':
literals[i] = ['point', [p1-p2 for p1,p2 in zip(op1,op2)]]
#=====================================================================
# plus operator for strings
elif l[1] == '+' and (isNextLitType('string')
and not isPrevLitType('name')
or (isPrevLitType('string'))
and not isNextLitType('name')):
op1 = popPrevLit()
op2 = popNextLit()
if op1[0] == 'int':
op1[1] = str(int(op1[1]))
else:
op1[1] = str(op1[1])
if op2[0] == 'int':
op2[1] = str(int(op2[1]))
else:
op2[1] = str(op2[1])
literals[i] = ['string', op1[1] + op2[1]]
#=====================================================================
# plus and minus as unary operators for numbers
elif l[1] in ['+', '-'] and isNextLitType(['float', 'int']):
op = popNextLit()
if l[1] == '+':
literals[i] = op
elif l[1] == '-':
literals[i] = [op[0], -op[1]]
#=====================================================================
# geometrical arithmetical operations
elif(l[1] in ['+', '-', '*'] and isPrevLitType('shape')
and isNextLitType('shape')):
op1 = popPrevLit()
op2 = popNextLit()
if l[1] == '+':
literals[i] = ['shape', op1[1].union(op2[1])]
elif l[1] == '-':
literals[i] = ['shape', op1[1].substract(op2[1])]
elif l[1] == '*':
literals[i] = ['shape', op1[1].intersect(op2[1])]
#=====================================================================
# point start, sep and end operators
elif l[1] == 'pstart' and isNextLitType(['float', 'int']):
op = popNextLit()
literals[i] = ["point-x", op[1]]
elif l[1] == 'psep' and isPrevLitType('point-x') and isNextLitType('point-y'):
op1 = popPrevLit()
op2 = popNextLit()
literals[i] = ["point", (op1[1], op2[1])]
elif l[1] == 'pend' and isPrevLitType(['float', 'int']):
op = popPrevLit()
literals[i] = ["point-y", op[1]]
#=====================================================================
# dot operator for imported shapes
elif(l[1] == '.' and isNextLitType('import')
and isPrevLitType('name')):
op1 = popPrevLit()
op2 = popNextLit()
largs, dargs = op2[2]
obj = _copy.deepcopy(self._root.importDict[op1[1]].shapeDict[op2[1]])
shape = self._instanciateShape(obj, largs, dargs)
utils.debug('self._literals['+str(i)+'] = ["shape", '+str(shape)+']')
literals[i] = ['shape', shape]
#=====================================================================
# dot operator for functions
elif(l[1] == '.' and isNextLitType('func')
and (viewNextLit()[1].check(viewPrevLit())
or (isPrevLitType('operator')
and viewPrevLit()[1] in ['pend', 'point-y']))):
if viewNextLit()[1].check(viewPrevLit()):
op1 = popPrevLit()
op2 = popNextLit()
literals[i] = op2[1](op1)
#=====================================================================
# argument list operator
elif l[1] == ',':
op1 = popPrevLit()
op2 = popNextLit()
if op1 is None:
l1 = []
elif op1[0] == 'argumentlist':
l1 = op1[1]
else:
l1 = [list(op1)]
if op2 is None:
l2 = []
elif op2[0] == 'argumentlist':
l2 = op2[1]
else:
l2 = [list(op2)]
literals[i] = ['argumentlist', l1+l2]
#=====================================================================
# assignment operator
elif l[1] == '=' and isPrevLitType('name'):
op1 = popPrevLit()
op2 = popNextLit()
literals[i] = ['assignment', [op1[1], op2]]
#=====================================================================
# make operator that creates shape refs
elif (l[1] == 'make'
and isPrevLitType('paramshaperef')
and isNextLitType('argumentlist')):
op1 = popPrevLit()
op2 = popNextLit()
paramSym = op1[1]
symParams = [v[1] for v in op2[1]]
utils.debug("symbol name pattern:", paramSym[0]['name_pattern'],
"params:", symParams)
symInstanceName = paramSym[0]['name_pattern'].format(*symParams)
if symInstanceName in self._root.gdsLib.cells.keys():
sym = self._root.gdsLib.cells[symInstanceName]
else:
_gdspy.current_library = self._root.gdsLib
sym = _gdspy.Cell(symInstanceName)
self._root.gdsLib.add(sym)
if len(list(sym)) == 0:
for section in paramSym:
tree = _copy.deepcopy(section['tree'])
# replace root reference with true reference:
tree._root = section['tree']._root
layer = section['layer']
argNames = section['args']
argdict = {k: self._py2lit(v) for k, v in zip(argNames, symParams)}
unresolvedNames = tree.resolveNames(argdict)
tree.evaluate()
if tree._result[0] != 'none':
shapeResult = False
try:
s = tree.getShape()
shapeResult = True
except ValueError:
refs = tree.getShaperef()
if shapeResult:
if s is None:
if unresolvedNames:
raise ValueError("Unresolved name(s) in layer shapes: "
+", ".join(['"'+n+'"' for n in unresolvedNames]))
else:
raise ValueError("Unexpected 'None'-shape found after instanciation "
+"of parametric symbol:\n"+str(tree))
shape = s._shape
if not shape is None:
if hasattr(shape, "layer"):
shape.layer = layer
elif hasattr(shape, "layers"):
shape.layers = [layer for _ in range(len(shape.layers))]
sym.add(shape)
else:
for ref in refs:
sym.add(ref)
# add created sym to all parents
# TODO: it would proably be better to use the 'importSymbols' of
# the PlsScript instance just before 'write_gds' is called.
# Otherwise layer transformation will not work, also the
# 'parent' attriute is unnecessary, we have importDict
# already...
parent = self._root.parent
while parent is not None:
_gdspy.current_library = parent.gdsLib
if sym.name not in parent.gdsLib:
parent.gdsLib.add(sym)
parent = parent.parent
_gdspy.current_library = self._root.gdsLib
literals[i] = ['shaperef', _gdspy.CellReference(sym)]
else:
if viewPrevLit():
t1 = viewPrevLit()
else:
t1 = 'None'
if viewNextLit():
t2 = viewNextLit()
else:
t2 = 'None'
if _PRINT_LIT_REDUCTION:
utils.debug("parsing paused...")
utils.debug()
raise ValueError("Illegal operands for operator '{}': {} and {}".format(l[1], t1, t2))
if _PRINT_LIT_REDUCTION:
utils.debug("applied operator:", l[1])
utils.debug()
i += 1
if _PRINT_LIT_REDUCTION:
utils.debug(literals)
utils.debug("Done reducing.")
utils.debug()
if (len(self._children[0]._literals) > 1
and not all([lit[0] == 'shaperef'
for lit in self._children[0]._literals])
and not any([lit[0] == 'paramshaperef'
for lit in self._children[0]._literals])):
raise ValueError("Syntax error.")
if len(self._children[0]._literals) == 1 and self._children[0]._literals[0][0] != 'shaperef':
self._result = self._children[0]._literals[0]
else:
self._result = self._children[0]._literals
def resolveNames(self, names):
unresolvedNames = []
# magic names:
names["__FILENAME__"] = ["string", _re.sub('\..*$', '', _os.path.basename(self._root.path))]
names["__HASH__"] = ["string", self._root.hash]
names["__DATE__"] = ["string", _time.strftime("%d.%m.%Y")]
names["__TIME__"] = ["string", _time.strftime("%H:%M")]
# constants:
names["True"] = ['int', 1]
names["False"] = ['int', 0]
for child in self._children:
if type(child) is CallTree:
child.resolveNames(names)
def resolveArglist(lit):
unresolvedNames = []
if lit[0] == 'argumentlist':
for i, sublit in enumerate(lit[1]):
if sublit[0] == 'assignment':
unresolvedNames.extend(resolveArglist(sublit[1][1]))
elif sublit[0] == 'name':
unresolvedNames.extend(resolveArglist(sublit))
elif lit[0] == 'name':
if lit[1] in names:
lit[0] = names[lit[1]][0]
lit[1] = _copy.deepcopy(names[lit[1]][1])
else:
unresolvedNames.append(names)
return unresolvedNames
if hasattr(self, '_result'):
unresolvedNames.extend(resolveArglist(self._result))
if hasattr(self, '_literals'):
for literal in self._literals:
if literal[0] == 'name':
if literal[1] in names:
literal[0] = names[literal[1]][0]
literal[1] = _copy.deepcopy(names[literal[1]][1])
else:
unresolvedNames.append(literal[1])
elif literal[0] == 'tree':
unresolvedNames.extend(literal[1]['tree'].resolveNames(names))
for name in names:
if name in literal[1]['args']:
literal[1]['args'].delete(name)
if len(literal[1]['args']) == 0:
literal[1]['tree'].evaluate()
utils.debug('Replacing: '+str(literal[1]['tree'])+' -> ["shape", '
+literal[1]['tree'].getShape()+ ']')
literal[0] = 'shape'
literal[1] = _copy.deepcopy(literal[1]['tree'].getShape())
else:
unresolvedNames.extend(resolveArglist(literal))
return unresolvedNames
def getShape(self, ref=False):
utils.debug('getShape() called:')
if hasattr(self, "_literals"):
utils.debug(' > self._literals = '+str(self._literals))
else:
utils.debug(' > self._literals = <undefined>')
if hasattr(self, "_result"):
utils.debug(' > self._result = '+str(self._result))
else:
utils.debug(' > self._result = <undefined>')
if hasattr(self, "_result"):
if ref:
if not all([r[0]=='shaperef' for r in self._result]):
raise ValueError('Expected only "shaperef" types but found: '+str(self._result))
return [r[1] for r in self._result]
else:
if self._result[0] != 'shape':
raise ValueError('Expected "shape" type result but found: '+str(self._result))
return self._result[1]
return None
def getShaperef(self):
return self.getShape(ref=True)
def __str__(self):
return self._strRec()
def __repr__(self):
return self._strRec()
def _strRec(self, level=0):
if hasattr(self, "_literals"):
hasLits = "'yes'"
else:
hasLits = "'no'"
if hasattr(self, "_result"):
hasRes = "'yes'"
else:
hasRes = "'no'"
result = (" "*level + "<CallTree object; func='"
+ self._func+"'; literals? "
+ hasLits+"; result? "
+ hasRes+">\n")
for child in self._children:
if type(child) is str:
result += " "*(level+1) + "'" + _re.sub("\s+", " ", child.strip()) + "'\n"
else:
result += child._strRec(level+1)
return result
|
py | 1a4bfd4cdf3af9a5965d0ee6349c2b4d03f443cd | from functools import wraps
from socket import gaierror
from time import sleep
import pandas as pd
import pytz
import requests
from bs4 import BeautifulSoup
from .exceptions import NoMatchingDataError, PaginationError
from .mappings import DOMAIN_MAPPINGS, BIDDING_ZONES, TIMEZONE_MAPPINGS, NEIGHBOURS
from .misc import year_blocks, day_blocks
from .parsers import parse_prices, parse_loads, parse_generation, \
parse_generation_per_plant, parse_installed_capacity_per_plant, \
parse_crossborder_flows, parse_imbalance_prices, parse_unavailabilities
__title__ = "entsoe-py"
__version__ = "0.2.9"
__author__ = "EnergieID.be"
__license__ = "MIT"
URL = 'https://transparency.entsoe.eu/api'
def retry(func):
"""Catches connection errors, waits and retries"""
@wraps(func)
def retry_wrapper(*args, **kwargs):
self = args[0]
error = None
for _ in range(self.retry_count):
try:
result = func(*args, **kwargs)
except (requests.ConnectionError, gaierror) as e:
error = e
print("Connection Error, retrying in {} seconds".format(self.retry_delay))
sleep(self.retry_delay)
continue
else:
return result
else:
raise error
return retry_wrapper
class EntsoeRawClient:
"""
Client to perform API calls and return the raw responses
API-documentation: https://transparency.entsoe.eu/content/static_content/Static%20content/web%20api/Guide.html#_request_methods
Attributions: Parts of the code for parsing Entsoe responses were copied
from https://github.com/tmrowco/electricitymap
"""
def __init__(self, api_key, session=None, retry_count=1, retry_delay=0,
proxies=None):
"""
Parameters
----------
api_key : str
session : requests.Session
retry_count : int
number of times to retry the call if the connection fails
retry_delay: int
amount of seconds to wait between retries
proxies : dict
requests proxies
"""
if api_key is None:
raise TypeError("API key cannot be None")
self.api_key = api_key
if session is None:
session = requests.Session()
self.session = session
self.proxies = proxies
self.retry_count = retry_count
self.retry_delay = retry_delay
@retry
def base_request(self, params, start, end):
"""
Parameters
----------
params : dict
start : pd.Timestamp
end : pd.Timestamp
Returns
-------
requests.Response
"""
start_str = self._datetime_to_str(start)
end_str = self._datetime_to_str(end)
base_params = {
'securityToken': self.api_key,
'periodStart': start_str,
'periodEnd': end_str
}
params.update(base_params)
response = self.session.get(url=URL, params=params,
proxies=self.proxies)
try:
response.raise_for_status()
except requests.HTTPError as e:
soup = BeautifulSoup(response.text, 'html.parser')
text = soup.find_all('text')
if len(text):
error_text = soup.find('text').text
if 'No matching data found' in error_text:
raise NoMatchingDataError
elif 'amount of requested data exceeds allowed limit' in error_text:
requested = error_text.split(' ')[-2]
raise PaginationError(
f"The API is limited to 200 elements per request. This query requested for {requested} documents and cannot be fulfilled as is.")
raise e
else:
return response
@staticmethod
def _datetime_to_str(dtm):
"""
Convert a datetime object to a string in UTC
of the form YYYYMMDDhh00
Parameters
----------
dtm : pd.Timestamp
Recommended to use a timezone-aware object!
If timezone-naive, UTC is assumed
Returns
-------
str
"""
if dtm.tzinfo is not None and dtm.tzinfo != pytz.UTC:
dtm = dtm.tz_convert("UTC")
fmt = '%Y%m%d%H00'
ret_str = dtm.strftime(fmt)
return ret_str
def query_day_ahead_prices(self, country_code, start, end):
"""
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
Returns
-------
str
"""
domain = BIDDING_ZONES[country_code]
params = {
'documentType': 'A44',
'in_Domain': domain,
'out_Domain': domain
}
response = self.base_request(params=params, start=start, end=end)
return response.text
def query_load(self, country_code, start, end):
"""
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
Returns
-------
str
"""
domain = BIDDING_ZONES[country_code]
params = {
'documentType': 'A65',
'processType': 'A16',
'outBiddingZone_Domain': domain,
'out_Domain': domain
}
response = self.base_request(params=params, start=start, end=end)
return response.text
def query_load_forecast(self, country_code, start, end):
"""
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
Returns
-------
str
"""
domain = BIDDING_ZONES[country_code]
params = {
'documentType': 'A65',
'processType': 'A01',
'outBiddingZone_Domain': domain,
# 'out_Domain': domain
}
response = self.base_request(params=params, start=start, end=end)
return response.text
def query_generation_forecast(self, country_code, start, end):
"""
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
Returns
-------
str
"""
domain = BIDDING_ZONES[country_code]
params = {
'documentType': 'A71',
'processType': 'A01',
'in_Domain': domain,
}
response = self.base_request(params=params, start=start, end=end)
return response.text
def query_wind_and_solar_forecast(self, country_code, start, end, psr_type=None, lookup_bzones=False):
"""
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
psr_type : str
filter on a single psr type
lookup_bzones : bool
if True, country_code is expected to be a bidding zone
Returns
-------
str
"""
if not lookup_bzones:
domain = DOMAIN_MAPPINGS[country_code]
else:
domain = BIDDING_ZONES[country_code]
params = {
'documentType': 'A69',
'processType': 'A01',
'in_Domain': domain,
}
if psr_type:
params.update({'psrType': psr_type})
response = self.base_request(params=params, start=start, end=end)
return response.text
def query_generation(self, country_code, start, end, psr_type=None, lookup_bzones=False):
"""
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
psr_type : str
filter on a single psr type
lookup_bzones : bool
if True, country_code is expected to be a bidding zone
Returns
-------
str
"""
if not lookup_bzones:
domain = DOMAIN_MAPPINGS[country_code]
else:
domain = BIDDING_ZONES[country_code]
params = {
'documentType': 'A75',
'processType': 'A16',
'in_Domain': domain,
}
if psr_type:
params.update({'psrType': psr_type})
response = self.base_request(params=params, start=start, end=end)
return response.text
def query_generation_per_plant(self, country_code, start, end, psr_type=None, lookup_bzones=False):
"""
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
psr_type : str
filter on a single psr type
lookup_bzones : bool
if True, country_code is expected to be a bidding zone
Returns
-------
str
"""
if not lookup_bzones:
domain = DOMAIN_MAPPINGS[country_code]
else:
domain = BIDDING_ZONES[country_code]
params = {
'documentType': 'A73',
'processType': 'A16',
'in_Domain': domain,
}
if psr_type:
params.update({'psrType': psr_type})
response = self.base_request(params=params, start=start, end=end)
return response.text
def query_installed_generation_capacity(self, country_code, start, end, psr_type=None):
"""
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
psr_type : str
filter query for a specific psr type
Returns
-------
str
"""
domain = DOMAIN_MAPPINGS[country_code]
params = {
'documentType': 'A68',
'processType': 'A33',
'in_Domain': domain,
}
if psr_type:
params.update({'psrType': psr_type})
response = self.base_request(params=params, start=start, end=end)
return response.text
def query_installed_generation_capacity_per_unit(self, country_code,
start, end, psr_type=None):
"""
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
psr_type : str
filter query for a specific psr type
Returns
-------
str
"""
domain = DOMAIN_MAPPINGS[country_code]
params = {
'documentType': 'A71',
'processType': 'A33',
'in_Domain': domain,
}
if psr_type:
params.update({'psrType': psr_type})
response = self.base_request(params=params, start=start, end=end)
return response.text
def query_crossborder_flows(self, country_code_from, country_code_to, start, end, lookup_bzones=False):
"""
Parameters
----------
country_code_from : str
country_code_to : str
start : pd.Timestamp
end : pd.Timestamp
lookup_bzones : bool
if True, country_code is expected to be a bidding zone
Returns
-------
str
"""
if not lookup_bzones:
domain_in = DOMAIN_MAPPINGS[country_code_to]
domain_out = DOMAIN_MAPPINGS[country_code_from]
else:
domain_in = BIDDING_ZONES[country_code_to]
domain_out = BIDDING_ZONES[country_code_from]
params = {
'documentType': 'A11',
'in_Domain': domain_in,
'out_Domain': domain_out
}
response = self.base_request(params=params, start=start, end=end)
return response.text
def query_imbalance_prices(self, country_code, start, end, psr_type=None):
"""
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
psr_type : str
filter query for a specific psr type
Returns
-------
str
"""
domain = DOMAIN_MAPPINGS[country_code]
params = {
'documentType': 'A85',
'controlArea_Domain': domain,
}
if psr_type:
params.update({'psrType': psr_type})
response = self.base_request(params=params, start=start, end=end)
return response.text
def query_unavailability(self, country_code, start, end,
doctype, docstatus=None, periodstartupdate = None,
periodendupdate = None) -> bytes:
"""
Generic unavailibility query method.
This endpoint serves ZIP files.
The query is limited to 200 items per request.
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
doctype : str
docstatus : str, optional
periodStartUpdate : pd.Timestamp, optional
periodEndUpdate : pd.Timestamp, optional
Returns
-------
bytes
"""
domain = BIDDING_ZONES[country_code]
params = {
'documentType': doctype,
'biddingZone_domain': domain
# ,'businessType': 'A53 (unplanned) | A54 (planned)'
}
if docstatus:
params['docStatus'] = docstatus
if periodstartupdate and periodendupdate:
params['periodStartUpdate'] = self._datetime_to_str(periodstartupdate)
params['periodEndUpdate'] = self._datetime_to_str(periodendupdate)
response = self.base_request(params=params, start=start, end=end)
return response.content
def query_unavailability_of_generation_units(self, country_code, start, end,
docstatus=None, periodstartupdate = None,
periodendupdate = None) -> bytes:
"""
This endpoint serves ZIP files.
The query is limited to 200 items per request.
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
docstatus : str, optional
periodStartUpdate : pd.Timestamp, optional
periodEndUpdate : pd.Timestamp, optional
Returns
-------
bytes
"""
content = self.query_unavailability(
country_code=country_code, start=start, end=end,
doctype="A80", docstatus=docstatus,
periodstartupdate = periodstartupdate,
periodendupdate = periodendupdate)
return content
def query_unavailability_of_production_units(self, country_code, start, end,
docstatus=None, periodstartupdate = None,
periodendupdate = None) -> bytes:
"""
This endpoint serves ZIP files.
The query is limited to 200 items per request.
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
docstatus : str, optional
periodStartUpdate : pd.Timestamp, optional
periodEndUpdate : pd.Timestamp, optional
Returns
-------
bytes
"""
content = self.query_unavailability(
country_code=country_code, start=start, end=end,
doctype="A77", docstatus=docstatus,
periodstartupdate = periodstartupdate,
periodendupdate = periodendupdate)
return content
def query_withdrawn_unavailability_of_generation_units(
self, country_code, start, end):
"""
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
"""
content = self.query_unavailability(
country_code=country_code, start=start, end=end,
doctype="A80", docstatus='A13')
return content
def paginated(func):
"""Catches a PaginationError, splits the requested period in two and tries
again. Finally it concatenates the results"""
@wraps(func)
def pagination_wrapper(*args, start, end, **kwargs):
try:
df = func(*args, start=start, end=end, **kwargs)
except PaginationError:
pivot = start + (end - start) / 2
df1 = pagination_wrapper(*args, start=start, end=pivot, **kwargs)
df2 = pagination_wrapper(*args, start=pivot, end=end, **kwargs)
df = pd.concat([df1, df2])
return df
return pagination_wrapper
def year_limited(func):
"""Deals with calls where you cannot query more than a year, by splitting
the call up in blocks per year"""
@wraps(func)
def year_wrapper(*args, start, end, **kwargs):
blocks = year_blocks(start, end)
frames = [func(*args, start=_start, end=_end, **kwargs) for _start, _end
in blocks]
df = pd.concat(frames)
return df
return year_wrapper
def day_limited(func):
"""Deals with calls where you cannot query more than a year, by splitting
the call up in blocks per year"""
@wraps(func)
def day_wrapper(*args, start, end, **kwargs):
blocks = day_blocks(start, end)
frames = [func(*args, start=_start, end=_end, **kwargs) for _start, _end
in blocks]
df = pd.concat(frames)
return df
return day_wrapper
class EntsoePandasClient(EntsoeRawClient):
@year_limited
def query_day_ahead_prices(self, country_code, start, end) -> pd.Series:
"""
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
Returns
-------
pd.Series
"""
text = super(EntsoePandasClient, self).query_day_ahead_prices(
country_code=country_code, start=start, end=end)
series = parse_prices(text)
series = series.tz_convert(TIMEZONE_MAPPINGS[country_code])
series = series.truncate(before=start, after=end)
return series
@year_limited
def query_load(self, country_code, start, end) -> pd.Series:
"""
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
Returns
-------
pd.Series
"""
text = super(EntsoePandasClient, self).query_load(
country_code=country_code, start=start, end=end)
series = parse_loads(text)
series = series.tz_convert(TIMEZONE_MAPPINGS[country_code])
series = series.truncate(before=start, after=end)
return series
@year_limited
def query_load_forecast(self, country_code, start, end) -> pd.Series:
"""
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
Returns
-------
pd.Series
"""
text = super(EntsoePandasClient, self).query_load_forecast(
country_code=country_code, start=start, end=end)
series = parse_loads(text)
series = series.tz_convert(TIMEZONE_MAPPINGS[country_code])
series = series.truncate(before=start, after=end)
return series
@year_limited
def query_generation_forecast(self, country_code, start, end) -> pd.Series:
"""
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
Returns
-------
pd.Series
"""
text = super(EntsoePandasClient, self).query_generation_forecast(
country_code=country_code, start=start, end=end)
series = parse_loads(text)
series = series.tz_convert(TIMEZONE_MAPPINGS[country_code])
series = series.truncate(before=start, after=end)
return series
@year_limited
def query_wind_and_solar_forecast(self, country_code, start, end, psr_type=None,
lookup_bzones=False):
"""
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
psr_type : str
filter on a single psr type
lookup_bzones : bool
if True, country_code is expected to be a bidding zone
Returns
-------
pd.DataFrame
"""
text = super(EntsoePandasClient, self).query_wind_and_solar_forecast(
country_code=country_code, start=start, end=end, psr_type=psr_type,
lookup_bzones=lookup_bzones)
df = parse_generation(text)
df = df.tz_convert(TIMEZONE_MAPPINGS[country_code])
df = df.truncate(before=start, after=end)
return df
@year_limited
def query_generation(self, country_code, start, end, psr_type=None,
lookup_bzones=False):
"""
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
psr_type : str
filter on a single psr type
lookup_bzones : bool
if True, country_code is expected to be a bidding zone
Returns
-------
pd.DataFrame
"""
text = super(EntsoePandasClient, self).query_generation(
country_code=country_code, start=start, end=end, psr_type=psr_type,
lookup_bzones=lookup_bzones)
df = parse_generation(text)
df = df.tz_convert(TIMEZONE_MAPPINGS[country_code])
df = df.truncate(before=start, after=end)
return df
@year_limited
def query_installed_generation_capacity(self, country_code, start, end,
psr_type=None):
"""
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
psr_type : str
filter query for a specific psr type
Returns
-------
pd.DataFrame
"""
text = super(
EntsoePandasClient, self).query_installed_generation_capacity(
country_code=country_code, start=start, end=end, psr_type=psr_type)
df = parse_generation(text)
df = df.tz_convert(TIMEZONE_MAPPINGS[country_code])
df = df.truncate(before=start, after=end)
return df
@year_limited
def query_installed_generation_capacity_per_unit(self, country_code,
start, end, psr_type=None):
"""
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
psr_type : str
filter query for a specific psr type
Returns
-------
pd.DataFrame
"""
text = super(
EntsoePandasClient, self).query_installed_generation_capacity_per_unit(
country_code=country_code, start=start, end=end, psr_type=psr_type)
df = parse_installed_capacity_per_plant(text)
return df
@year_limited
def query_crossborder_flows(self, country_code_from, country_code_to, start, end, lookup_bzones=False):
"""
Note: Result will be in the timezone of the origin country
Parameters
----------
country_code_from : str
country_code_to : str
start : pd.Timestamp
end : pd.Timestamp
Returns
-------
pd.Series
"""
text = super(EntsoePandasClient, self).query_crossborder_flows(
country_code_from=country_code_from,
country_code_to=country_code_to, start=start, end=end, lookup_bzones=lookup_bzones)
ts = parse_crossborder_flows(text)
ts = ts.tz_convert(TIMEZONE_MAPPINGS[country_code_from])
ts = ts.truncate(before=start, after=end)
return ts
@year_limited
def query_imbalance_prices(self, country_code, start, end, psr_type=None):
"""
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
psr_type : str
filter query for a specific psr type
Returns
-------
pd.DataFrame
"""
text = super(EntsoePandasClient, self).query_imbalance_prices(
country_code=country_code, start=start, end=end, psr_type=psr_type)
df = parse_imbalance_prices(text)
df = df.tz_convert(TIMEZONE_MAPPINGS[country_code])
df = df.truncate(before=start, after=end)
return df
@year_limited
@paginated
def query_unavailability(self, country_code, start, end, doctype,
docstatus=None, periodstartupdate = None,
periodendupdate = None):
"""
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
doctype : str
docstatus : str, optional
periodStartUpdate : pd.Timestamp, optional
periodEndUpdate : pd.Timestamp, optional
Returns
-------
pd.DataFrame
"""
content = super(EntsoePandasClient,
self).query_unavailability(
country_code=country_code, start=start, end=end, doctype = doctype,
docstatus=docstatus, periodstartupdate = periodstartupdate,
periodendupdate = periodendupdate)
df = parse_unavailabilities(content)
df = df.tz_convert(TIMEZONE_MAPPINGS[country_code])
df['start'] = df['start'].apply(lambda x: x.tz_convert(TIMEZONE_MAPPINGS[country_code]))
df['end'] = df['end'].apply(lambda x: x.tz_convert(TIMEZONE_MAPPINGS[country_code]))
df = df.truncate(before=start, after=end)
return df
@year_limited
@paginated
def query_unavailability_of_generation_units(self, country_code, start, end,
docstatus=None, periodstartupdate = None,
periodendupdate = None):
"""
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
docstatus : str, optional
periodStartUpdate : pd.Timestamp, optional
periodEndUpdate : pd.Timestamp, optional
Returns
-------
pd.DataFrame
"""
df = super(EntsoePandasClient,
self).query_unavailability_of_generation_units(
country_code=country_code, start=start, end=end,
docstatus=docstatus, periodstartupdate = periodstartupdate,
periodendupdate = periodendupdate)
df = df.tz_convert(TIMEZONE_MAPPINGS[country_code])
df['start'] = df['start'].apply(lambda x: x.tz_convert(TIMEZONE_MAPPINGS[country_code]))
df['end'] = df['end'].apply(lambda x: x.tz_convert(TIMEZONE_MAPPINGS[country_code]))
df = df.truncate(before=start, after=end)
return df
@year_limited
@paginated
def query_unavailability_of_production_units(self, country_code, start, end,
docstatus=None, periodstartupdate = None,
periodendupdate = None):
"""
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
docstatus : str, optional
periodStartUpdate : pd.Timestamp, optional
periodEndUpdate : pd.Timestamp, optional
Returns
-------
pd.DataFrame
"""
content = super(EntsoePandasClient,
self).query_unavailability_of_production_units(
country_code=country_code, start=start, end=end,
docstatus=docstatus, periodstartupdate = periodstartupdate,
periodendupdate = periodendupdate)
df = parse_unavailabilities(content)
df = df.tz_convert(TIMEZONE_MAPPINGS[country_code])
df['start'] = df['start'].apply(lambda x: x.tz_convert(TIMEZONE_MAPPINGS[country_code]))
df['end'] = df['end'].apply(lambda x: x.tz_convert(TIMEZONE_MAPPINGS[country_code]))
df = df.truncate(before=start, after=end)
return df
def query_withdrawn_unavailability_of_generation_units(
self, country_code, start, end):
"""
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
Returns
-------
pd.DataFrame
"""
df = self.query_unavailability_of_generation_units(
country_code=country_code, start=start, end=end, docstatus='A13')
df = df.truncate(before=start, after=end)
return df
@day_limited
def query_generation_per_plant(self, country_code, start, end, psr_type=None,lookup_bzones=False):
"""
Parameters
----------
country_code : str
start : pd.Timestamp
end : pd.Timestamp
psr_type : str
filter on a single psr type
lookup_bzones : bool
if True, country_code is expected to be a bidding zone
Returns
-------
pd.DataFrame
"""
text = super(EntsoePandasClient, self).query_generation_per_plant(
country_code=country_code, start=start, end=end, psr_type=psr_type,
lookup_bzones=lookup_bzones)
df = parse_generation_per_plant(text)
df = df.tz_convert(TIMEZONE_MAPPINGS[country_code])
df = df.truncate(before=start, after=end)
return df
def query_import(self, country_code: str, start: pd.Timestamp, end: pd.Timestamp) -> pd.DataFrame:
"""
Adds together all incoming cross-border flows to a country
The neighbours of a country are given by the NEIGHBOURS mapping
"""
imports = []
for neighbour in NEIGHBOURS[country_code]:
try:
im = self.query_crossborder_flows(country_code_from=neighbour, country_code_to=country_code, end=end,
start=start, lookup_bzones=True)
except NoMatchingDataError:
continue
im.name = neighbour
imports.append(im)
df = pd.concat(imports, axis=1)
df = df.loc[:, (df != 0).any(axis=0)] # drop columns that contain only zero's
df = df.tz_convert(TIMEZONE_MAPPINGS[country_code])
df = df.truncate(before=start, after=end)
return df
def query_generation_import(self, country_code: str, start: pd.Timestamp, end: pd.Timestamp) -> pd.DataFrame:
"""Query the combination of both domestic generation and imports"""
generation = self.query_generation(country_code=country_code, end=end, start=start, lookup_bzones=True)
generation = generation.loc[:, (generation != 0).any(axis=0)] # drop columns that contain only zero's
generation = generation.resample('H').sum()
imports = self.query_import(country_code=country_code, start=start, end=end)
data = {f'Generation': generation, f'Import': imports}
df = pd.concat(data.values(), axis=1, keys=data.keys())
df = df.truncate(before=start, after=end)
return df
|
py | 1a4bfdcdadba959be207609384f87983e87b75e0 | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import inspect
from pandapower.auxiliary import _check_bus_index_and_print_warning_if_high, \
_check_gen_index_and_print_warning_if_high, _init_runpp_options, _init_rundcopp_options, \
_init_rundcpp_options, _init_runopp_options, _internal_stored
from pandapower.opf.validate_opf_input import _check_necessary_opf_parameters
from pandapower.optimal_powerflow import _optimal_powerflow
from pandapower.powerflow import _powerflow, _recycled_powerflow
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
def set_user_pf_options(net, overwrite=False, **kwargs):
"""
This function sets the 'user_pf_options' dict for net. These options overrule
net.__internal_options once they are added to net. These options are used in configuration of
load flow calculation.
At the same time, user-defined arguments for pandapower.runpp() always have a higher priority.
To remove user_pf_options, set overwrite=True and provide no additional arguments
:param net: pandaPower network
:param overwrite: specifies whether the user_pf_options is removed before setting new options
:param kwargs: load flow options, e. g. tolerance_mva = 1e-3
:return: None
"""
standard_parameters = ['calculate_voltage_angles', 'trafo_model', 'check_connectivity', 'mode',
'copy_constraints_to_ppc', 'switch_rx_ratio', 'enforce_q_lims',
'recycle', 'voltage_depend_loads', 'consider_line_temperature', 'delta',
'trafo3w_losses', 'init_vm_pu', 'init_va_degree', 'init_results',
'tolerance_mva', 'trafo_loading', 'numba', 'ac', 'algorithm',
'max_iteration', 'v_debug', 'run_control']
if overwrite or 'user_pf_options' not in net.keys():
net['user_pf_options'] = dict()
net.user_pf_options.update({key: val for key, val in kwargs.items()
if key in standard_parameters})
additional_kwargs = {key: val for key, val in kwargs.items()
if key not in standard_parameters}
# this part is to inform user and to make typos in parameters visible
if len(additional_kwargs) > 0:
logger.info('parameters %s are not in the list of standard options' % list(
additional_kwargs.keys()))
net.user_pf_options.update(additional_kwargs)
def runpp(net, algorithm='nr', calculate_voltage_angles="auto", init="auto",
max_iteration="auto", tolerance_mva=1e-8, trafo_model="t",
trafo_loading="current", enforce_q_lims=False, check_connectivity=True,
voltage_depend_loads=True, consider_line_temperature=False,
run_control=False, **kwargs):
"""
Runs a power flow
INPUT:
**net** - The pandapower format network
OPTIONAL:
**algorithm** (str, "nr") - algorithm that is used to solve the power flow problem.
The following algorithms are available:
- "nr" Newton-Raphson (pypower implementation with numba accelerations)
- "iwamoto_nr" Newton-Raphson with Iwamoto multiplier (maybe slower than NR but more robust)
- "bfsw" backward/forward sweep (specially suited for radial and weakly-meshed networks)
- "gs" gauss-seidel (pypower implementation)
- "fdbx" fast-decoupled (pypower implementation)
- "fdxb" fast-decoupled (pypower implementation)
**calculate_voltage_angles** (str or bool, "auto") - consider voltage angles in loadflow calculation
If True, voltage angles of ext_grids and transformer shifts are considered in the
loadflow calculation. Considering the voltage angles is only necessary in meshed
networks that are usually found in higher voltage levels. calculate_voltage_angles
in "auto" mode defaults to:
- True, if the network voltage level is above 70 kV
- False otherwise
The network voltage level is defined as the maximum rated voltage of any bus in the network that
is connected to a line.
**init** (str, "auto") - initialization method of the loadflow
pandapower supports four methods for initializing the loadflow:
- "auto" - init defaults to "dc" if calculate_voltage_angles is True or "flat" otherwise
- "flat"- flat start with voltage of 1.0pu and angle of 0° at all PQ-buses and 0° for PV buses as initial solution, the slack bus is initialized with the values provided in net["ext_grid"]
- "dc" - initial DC loadflow before the AC loadflow. The results of the DC loadflow are used as initial solution for the AC loadflow. Note that the DC loadflow only calculates voltage angles at PQ and PV buses, voltage magnitudes are still flat started.
- "results" - voltage vector of last loadflow from net.res_bus is used as initial solution. This can be useful to accelerate convergence in iterative loadflows like time series calculations.
Considering the voltage angles might lead to non-convergence of the power flow in flat start.
That is why in "auto" mode, init defaults to "dc" if calculate_voltage_angles is True or "flat" otherwise
**max_iteration** (int, "auto") - maximum number of iterations carried out in the power flow algorithm.
In "auto" mode, the default value depends on the power flow solver:
- 10 for "nr"
- 100 for "bfsw"
- 1000 for "gs"
- 30 for "fdbx"
- 30 for "fdxb"
**tolerance_mva** (float, 1e-8) - loadflow termination condition referring to P / Q mismatch of node power in MVA
**trafo_model** (str, "t") - transformer equivalent circuit model
pandapower provides two equivalent circuit models for the transformer:
- "t" - transformer is modeled as equivalent with the T-model.
- "pi" - transformer is modeled as equivalent PI-model. This is not recommended, since it is less exact than the T-model. It is only recommended for valdiation with other software that uses the pi-model.
**trafo_loading** (str, "current") - mode of calculation for transformer loading
Transformer loading can be calculated relative to the rated current or the rated power. In both cases the overall transformer loading is defined as the maximum loading on the two sides of the transformer.
- "current"- transformer loading is given as ratio of current flow and rated current of the transformer. This is the recommended setting, since thermal as well as magnetic effects in the transformer depend on the current.
- "power" - transformer loading is given as ratio of apparent power flow to the rated apparent power of the transformer.
**enforce_q_lims** (bool, False) - respect generator reactive power limits
If True, the reactive power limits in net.gen.max_q_mvar/min_q_mvar are respected in the
loadflow. This is done by running a second loadflow if reactive power limits are
violated at any generator, so that the runtime for the loadflow will increase if reactive
power has to be curtailed.
Note: enforce_q_lims only works if algorithm="nr"!
**check_connectivity** (bool, True) - Perform an extra connectivity test after the conversion from pandapower to PYPOWER
If True, an extra connectivity test based on SciPy Compressed Sparse Graph Routines is perfomed.
If check finds unsupplied buses, they are set out of service in the ppc
**voltage_depend_loads** (bool, True) - consideration of voltage-dependent loads. If False, net.load.const_z_percent and net.load.const_i_percent are not considered, i.e. net.load.p_mw and net.load.q_mvar are considered as constant-power loads.
**consider_line_temperature** (bool, False) - adjustment of line impedance based on provided
line temperature. If True, net.line must contain a column "temperature_degree_celsius".
The temperature dependency coefficient alpha must be provided in the net.line.alpha
column, otherwise the default value of 0.004 is used
**KWARGS:
**numba** (bool, True) - Activation of numba JIT compiler in the newton solver
If set to True, the numba JIT compiler is used to generate matrices for the powerflow,
which leads to significant speed improvements.
**switch_rx_ratio** (float, 2) - rx_ratio of bus-bus-switches. If impedance is zero, buses connected by a closed bus-bus switch are fused to model an ideal bus. Otherwise, they are modelled as branches with resistance defined as z_ohm column in switch table and this parameter
**delta_q** - Reactive power tolerance for option "enforce_q_lims" in kvar - helps convergence in some cases.
**trafo3w_losses** - defines where open loop losses of three-winding transformers are considered. Valid options are "hv", "mv", "lv" for HV/MV/LV side or "star" for the star point.
**v_debug** (bool, False) - if True, voltage values in each newton-raphson iteration are logged in the ppc
**init_vm_pu** (string/float/array/Series, None) - Allows to define initialization specifically for voltage magnitudes. Only works with init == "auto"!
- "auto": all buses are initialized with the mean value of all voltage controlled elements in the grid
- "flat" for flat start from 1.0
- "results": voltage magnitude vector is taken from result table
- a float with which all voltage magnitudes are initialized
- an iterable with a voltage magnitude value for each bus (length and order has to match with the buses in net.bus)
- a pandas Series with a voltage magnitude value for each bus (indexes have to match the indexes in net.bus)
**init_va_degree** (string/float/array/Series, None) - Allows to define initialization specifically for voltage angles. Only works with init == "auto"!
- "auto": voltage angles are initialized from DC power flow if angles are calculated or as 0 otherwise
- "dc": voltage angles are initialized from DC power flow
- "flat" for flat start from 0
- "results": voltage angle vector is taken from result table
- a float with which all voltage angles are initialized
- an iterable with a voltage angle value for each bus (length and order has to match with the buses in net.bus)
- a pandas Series with a voltage angle value for each bus (indexes have to match the indexes in net.bus)
**recycle** (dict, none) - Reuse of internal powerflow variables for time series calculation
Contains a dict with the following parameters:
bus_pq: If True PQ values of buses are updated
trafo: If True trafo relevant variables, e.g., the Ybus matrix, is recalculated
gen: If True Sbus and the gen table in the ppc are recalculated
**neglect_open_switch_branches** (bool, False) - If True no auxiliary buses are created for branches when switches are opened at the branch. Instead branches are set out of service
"""
# if dict 'user_pf_options' is present in net, these options overrule the net.__internal_options
# except for parameters that are passed by user
recycle = kwargs.get("recycle", None)
if isinstance(recycle, dict) and _internal_stored(net):
_recycled_powerflow(net, **kwargs)
return
if run_control and net.controller.in_service.any():
from pandapower.control import run_control
parameters = {**locals(), **kwargs}
# disable run control for inner loop to avoid infinite loop
parameters["run_control"] = False
run_control(**parameters)
else:
passed_parameters = _passed_runpp_parameters(locals())
_init_runpp_options(net, algorithm=algorithm, calculate_voltage_angles=calculate_voltage_angles,
init=init, max_iteration=max_iteration, tolerance_mva=tolerance_mva,
trafo_model=trafo_model, trafo_loading=trafo_loading,
enforce_q_lims=enforce_q_lims, check_connectivity=check_connectivity,
voltage_depend_loads=voltage_depend_loads,
consider_line_temperature=consider_line_temperature,
passed_parameters=passed_parameters, **kwargs)
_check_bus_index_and_print_warning_if_high(net)
_check_gen_index_and_print_warning_if_high(net)
_powerflow(net, **kwargs)
def rundcpp(net, trafo_model="t", trafo_loading="current", recycle=None, check_connectivity=True,
switch_rx_ratio=2, trafo3w_losses="hv", **kwargs):
"""
Runs PANDAPOWER DC Flow
INPUT:
**net** - The pandapower format network
OPTIONAL:
**trafo_model** (str, "t") - transformer equivalent circuit model
pandapower provides two equivalent circuit models for the transformer:
- "t" - transformer is modeled as equivalent with the T-model. This is consistent with PowerFactory and is also more accurate than the PI-model. We recommend using this transformer model.
- "pi" - transformer is modeled as equivalent PI-model. This is consistent with Sincal, but the method is questionable since the transformer is physically T-shaped. We therefore recommend the use of the T-model.
**trafo_loading** (str, "current") - mode of calculation for transformer loading
Transformer loading can be calculated relative to the rated current or the rated power. In both cases the overall transformer loading is defined as the maximum loading on the two sides of the transformer.
- "current"- transformer loading is given as ratio of current flow and rated current of the transformer. This is the recommended setting, since thermal as well as magnetic effects in the transformer depend on the current.
- "power" - transformer loading is given as ratio of apparent power flow to the rated apparent power of the transformer.
**check_connectivity** (bool, False) - Perform an extra connectivity test after the conversion from pandapower to PYPOWER
If true, an extra connectivity test based on SciPy Compressed Sparse Graph Routines is perfomed.
If check finds unsupplied buses, they are put out of service in the PYPOWER matrix
**switch_rx_ratio** (float, 2) - rx_ratio of bus-bus-switches. If impedance is zero, buses connected by a closed bus-bus switch are fused to model an ideal bus. Otherwise, they are modelled as branches with resistance defined as z_ohm column in switch table and this parameter
**trafo3w_losses** (str, "hv") - defines where open loop losses of three-winding transformers are considered. Valid options are "hv", "mv", "lv" for HV/MV/LV side or "star" for the star point.
****kwargs** - options to use for PYPOWER.runpf
"""
_init_rundcpp_options(net, trafo_model=trafo_model, trafo_loading=trafo_loading,
recycle=recycle, check_connectivity=check_connectivity,
switch_rx_ratio=switch_rx_ratio, trafo3w_losses=trafo3w_losses, **kwargs)
_check_bus_index_and_print_warning_if_high(net)
_check_gen_index_and_print_warning_if_high(net)
_powerflow(net, **kwargs)
def runopp(net, verbose=False, calculate_voltage_angles=True, check_connectivity=True,
suppress_warnings=True, switch_rx_ratio=2, delta=1e-10, init="flat", numba=True,
trafo3w_losses="hv", consider_line_temperature=False, **kwargs):
"""
Runs the pandapower Optimal Power Flow.
Flexibilities, constraints and cost parameters are defined in the pandapower element tables.
Flexibilities can be defined in net.sgen / net.gen /net.load / net.storage /net.ext_grid
net.sgen.controllable if a static generator is controllable. If False,
the active and reactive power are assigned as in a normal power flow. If True, the following
flexibilities apply:
- net.gen.min_p_mw / net.gen.max_p_mw
- net.gen.min_q_mvar / net.gen.max_q_mvar
- net.sgen.min_p_mw / net.sgen.max_p_mw
- net.sgen.min_q_mvar / net.sgen.max_q_mvar
- net.dcline.max_p_mw
- net.dcline.min_q_to_mvar / net.dcline.max_q_to_mvar / net.dcline.min_q_from_mvar / net.dcline.max_q_from_mvar
- net.ext_grid.min_p_mw / net.ext_grid.max_p_mw
- net.ext_grid.min_q_mvar / net.ext_grid.max_q_mvar
- net.load.min_p_mw / net.load.max_p_mw
- net.load.min_q_mvar / net.load.max_q_mvar
- net.storage.min_p_mw / net.storage.max_p_mw
- net.storage.min_q_mvar / net.storage.max_q_mvar
Controllable loads behave just like controllable static generators. It must be stated if they are controllable.
Otherwise, they are not respected as flexibilities.
Dc lines are controllable per default
Network constraints can be defined for buses, lines and transformers the elements in the following columns:
- net.bus.min_vm_pu / net.bus.max_vm_pu
- net.line.max_loading_percent
- net.trafo.max_loading_percent
- net.trafo3w.max_loading_percent
If the external grid ist controllable, the voltage setpoint of the external grid can be optimized within the
voltage constraints by the OPF. The same applies to the voltage setpoints of the controllable generator elements.
How these costs are combined into a cost function depends on the cost_function parameter.
INPUT:
**net** - The pandapower format network
OPTIONAL:
**verbose** (bool, False) - If True, some basic information is printed
**suppress_warnings** (bool, True) - suppress warnings in pypower
If set to True, warnings are disabled during the loadflow. Because of the way data is
processed in pypower, ComplexWarnings are raised during the loadflow.
These warnings are suppressed by this option, however keep in mind all other pypower
warnings are suppressed, too.
**init** (str, "flat") - init of starting opf vector. Options are "flat" or "pf"
Starting solution vector (x0) for opf calculations is determined by this flag. Options are:
"flat" (default): starting vector is (upper bound - lower bound) / 2
"pf": a power flow is executed prior to the opf and the pf solution is the starting vector. This may improve
convergence, but takes a longer runtime (which are probably neglectible for opf calculations)
**delta** (float, 1e-10) - power tolerance
**trafo3w_losses** (str, "hv") - defines where open loop losses of three-winding transformers are considered. Valid options are "hv", "mv", "lv" for HV/MV/LV side or "star" for the star point.
**consider_line_temperature** (bool, False) - adjustment of line impedance based on provided\
line temperature. If True, net.line must contain a column "temperature_degree_celsius".\
The temperature dependency coefficient alpha must be provided in the net.line.alpha\
column, otherwise the default value of 0.004 is used
**kwargs** - Pypower / Matpower keyword arguments:
- OPF_VIOLATION (5e-6) constraint violation tolerance
- PDIPM_COSTTOL (1e-6) optimality tolerance
- PDIPM_GRADTOL (1e-6) gradient tolerance
- PDIPM_COMPTOL (1e-6) complementarity condition (inequality) tolerance
- PDIPM_FEASTOL (set to OPF_VIOLATION if not specified) feasibiliy (equality) tolerance
- PDIPM_MAX_IT (150) maximum number of iterations
- SCPDIPM_RED_IT(20) maximum number of step size reductions per iteration
"""
_check_necessary_opf_parameters(net, logger)
_init_runopp_options(net, calculate_voltage_angles=calculate_voltage_angles,
check_connectivity=check_connectivity,
switch_rx_ratio=switch_rx_ratio, delta=delta, init=init, numba=numba,
trafo3w_losses=trafo3w_losses,
consider_line_temperature=consider_line_temperature, **kwargs)
_check_bus_index_and_print_warning_if_high(net)
_check_gen_index_and_print_warning_if_high(net)
_optimal_powerflow(net, verbose, suppress_warnings, **kwargs)
def rundcopp(net, verbose=False, check_connectivity=True, suppress_warnings=True,
switch_rx_ratio=0.5, delta=1e-10, trafo3w_losses="hv", **kwargs):
"""
Runs the pandapower Optimal Power Flow.
Flexibilities, constraints and cost parameters are defined in the pandapower element tables.
Flexibilities for generators can be defined in net.sgen / net.gen.
net.sgen.controllable / net.gen.controllable signals if a generator is controllable. If False,
the active and reactive power are assigned as in a normal power flow. If yes, the following
flexibilities apply:
- net.sgen.min_p_mw / net.sgen.max_p_mw
- net.gen.min_p_mw / net.gen.max_p_mw
- net.load.min_p_mw / net.load.max_p_mw
Network constraints can be defined for buses, lines and transformers the elements in the following columns:
- net.line.max_loading_percent
- net.trafo.max_loading_percent
- net.trafo3w.max_loading_percent
INPUT:
**net** - The pandapower format network
OPTIONAL:
**verbose** (bool, False) - If True, some basic information is printed
**suppress_warnings** (bool, True) - suppress warnings in pypower
If set to True, warnings are disabled during the loadflow. Because of the way data is
processed in pypower, ComplexWarnings are raised during the loadflow.
These warnings are suppressed by this option, however keep in mind all other pypower
warnings are suppressed, too.
**delta** (float, 1e-10) - power tolerance
**trafo3w_losses** (str, "hv") - defines where open loop losses of three-winding transformers are considered. Valid options are "hv", "mv", "lv" for HV/MV/LV side or "star" for the star point.
"""
if (not net.sgen.empty) & ("controllable" not in net.sgen.columns):
logger.warning('Warning: Please specify sgen["controllable"]\n')
if (not net.load.empty) & ("controllable" not in net.load.columns):
logger.warning('Warning: Please specify load["controllable"]\n')
_init_rundcopp_options(net, check_connectivity=check_connectivity,
switch_rx_ratio=switch_rx_ratio, delta=delta,
trafo3w_losses=trafo3w_losses, **kwargs)
_check_bus_index_and_print_warning_if_high(net)
_check_gen_index_and_print_warning_if_high(net)
_optimal_powerflow(net, verbose, suppress_warnings, **kwargs)
def _passed_runpp_parameters(local_parameters):
"""
Internal function to distinguish arguments for pandapower.runpp() that are explicitly passed by
the user.
:param local_parameters: locals() in the runpp() function
:return: dictionary of explicitly passed parameters
"""
net = local_parameters.pop("net")
if not ("user_pf_options" in net.keys() and len(net.user_pf_options) > 0):
return None
try:
default_parameters = {k: v.default for k, v in inspect.signature(runpp).parameters.items()}
except:
args, varargs, keywords, defaults = inspect.getfullargspec(runpp)
default_parameters = dict(zip(args[-len(defaults):], defaults))
default_parameters.update({"init": "auto"})
passed_parameters = {
key: val for key, val in local_parameters.items()
if key in default_parameters.keys() and val != default_parameters.get(key, None)}
return passed_parameters
|
py | 1a4bfe1b1a4a1f0514578889693c6a805c8bccce | """
Given an integer array with all positive numbers and no duplicates, find the
number of possible combinations that add up to a positive integer target.
Example:
nums = [1, 2, 3]
target = 4
The possible combination ways are:
(1, 1, 1, 1)
(1, 1, 2)
(1, 2, 1)
(1, 3)
(2, 1, 1)
(2, 2)
(3, 1)
Note that different sequences are counted as different combinations.
Therefore the output is 7.
Follow up:
What if negative numbers are allowed in the given array?
How does it change the problem?
What limitation we need to add to the question to allow negative numbers?
Credits:
Special thanks to @pbrother for adding this problem and creating all test
cases.
"""
class Solution(object):
def combinationSum4(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
if nums is None or nums == []:
return 0
nums.sort()
dp = [0] * (target + 1)
dp[0] = 1
for i in range(target + 1):
for j in nums:
if i + j <= target:
dp[i + j] += dp[i]
else:
break
return dp[-1]
a = Solution()
print(a.combinationSum4([1,2,3],4))
|
py | 1a4bfe86c76a900ab874df9d14a51779d6e435c1 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TensorFlow Probability experimental NUTS package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_probability.python.experimental.mcmc.elliptical_slice_sampler import EllipticalSliceSampler
from tensorflow_probability.python.experimental.mcmc.nuts import NoUTurnSampler
from tensorflow_probability.python.experimental.mcmc.sample_sequential_monte_carlo import sample_sequential_monte_carlo
from tensorflow.python.util.all_util import remove_undocumented # pylint: disable=g-direct-tensorflow-import
_allowed_symbols = [
'EllipticalSliceSampler',
'NoUTurnSampler',
'sample_sequential_monte_carlo',
]
remove_undocumented(__name__, _allowed_symbols)
|
py | 1a4bfebbeb64cc9520ef2608b9d270f7260e568b | import numpy
from chainer.functions.connection import bilinear
from chainer import link
class Bilinear(link.Link):
"""Bilinear layer that performs tensor multiplication.
Bilinear is a primitive link that wraps the
:func:`~chainer.functions.bilinear` functions. It holds parameters ``W``,
``V1``, ``V2``, and ``b`` corresponding to the arguments of
:func:`~chainer.functions.bilinear`.
Args:
left_size (int): Dimension of input vector :math:`e^1` (:math:`J`)
right_size (int): Dimension of input vector :math:`e^2` (:math:`K`)
out_size (int): Dimension of output vector :math:`y` (:math:`L`)
nobias (bool): If ``True``, parameters ``V1``, ``V2``, and ``b`` are
omitted.
initialW (3-D numpy array): Initial value of :math:`W`.
Shape of this argument must be
``(left_size, right_size, out_size)``. If ``None``,
:math:`W` is initialized by centered Gaussian distribution properly
scaled according to the dimension of inputs and outputs.
initial_bias (tuple): Initial values of :math:`V^1`, :math:`V^2`
and :math:`b`. The length this argument must be 3.
Each element of this tuple must have the shapes of
``(left_size, output_size)``, ``(right_size, output_size)``,
and ``(output_size,)``, respectively. If ``None``, :math:`V^1`
and :math:`V^2` is initialized by scaled centered Gaussian
distributions and :math:`b` is set to :math:`0`.
.. seealso:: See :func:`chainer.functions.bilinear` for details.
Attributes:
W (~chainer.Variable): Bilinear weight parameter.
V1 (~chainer.Variable): Linear weight parameter for the first argument.
V2 (~chainer.Variable): Linear weight parameter for the second
argument.
b (~chainer.Variable): Bias parameter.
"""
def __init__(self, left_size, right_size, out_size, nobias=False,
initialW=None, initial_bias=None):
super(Bilinear, self).__init__(W=(left_size, right_size, out_size))
self.in_sizes = (left_size, right_size)
self.nobias = nobias
if initialW is not None:
assert initialW.shape == self.W.data.shape
self.W.data[...] = initialW
else:
# TODO(Kenta OONO): I do not know appropriate way of
# initializing weights in tensor network.
# This initialization is a modification of
# that of Linear function.
in_size = left_size * right_size * out_size
self.W.data[...] = numpy.random.normal(
0, numpy.sqrt(1. / in_size), self.W.data.shape)
if not self.nobias:
self.add_param('V1', (left_size, out_size))
self.add_param('V2', (right_size, out_size))
self.add_param('b', out_size)
if initial_bias is not None:
V1, V2, b = initial_bias
assert V1.shape == self.V1.data.shape
assert V2.shape == self.V2.data.shape
assert b.shape == self.b.data.shape
self.V1.data[...] = V1
self.V2.data[...] = V2
self.b.data[...] = b
else:
self.V1.data[...] = numpy.random.normal(
0, numpy.sqrt(1. / left_size), (left_size, out_size))
self.V2.data[...] = numpy.random.normal(
0, numpy.sqrt(1. / right_size), (right_size, out_size))
self.b.data.fill(0)
def __call__(self, e1, e2):
"""Applies the bilinear function to inputs and the internal parameters.
Args:
e1 (~chainer.Variable): Left input.
e2 (~chainer.Variable): Right input.
Returns:
~chainer.Variable: Output variable.
"""
if self.nobias:
return bilinear.bilinear(e1, e2, self.W)
else:
return bilinear.bilinear(e1, e2, self.W, self.V1, self.V2, self.b)
def zero_grads(self):
# Left for backward compatibility
self.zerograds()
|
py | 1a4bfec21a963a3b3496a9ee4e2180cd93a6734f | import sublime
import sublime_plugin
import subprocess
from .path_utils import path_for_view
SCRIPT_PATH = 'Packages/SublimeConfig/src/commands/open_current_directory_in_terminal.applescript'
def osascript(
*,
script,
args=[]
):
cmd = ['osascript', '-'] + args
proc = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return proc.communicate(input=script)
class OpenCurrentDirectoryInTerminalCommand(sublime_plugin.TextCommand):
def run(self, edit):
directory, filename = path_for_view(self.view)
script = sublime.load_binary_resource(SCRIPT_PATH)
osascript(script=script, args=[directory])
|
py | 1a4c0009f1149f2b38828ce1a24dbb3dc39b312e | # Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2013-2014 Google, Inc.
# Copyright (c) 2013 [email protected] <[email protected]>
# Copyright (c) 2014-2017 Claudiu Popa <[email protected]>
# Copyright (c) 2014 Brett Cannon <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2016 Moises Lopez <[email protected]>
# Copyright (c) 2017-2018 Bryce Guinta <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""utilities methods and classes for checkers
Base id of standard checkers (used in msg and report ids):
01: base
02: classes
03: format
04: import
05: misc
06: variables
07: exceptions
08: similar
09: design_analysis
10: newstyle
11: typecheck
12: logging
13: string_format
14: string_constant
15: stdlib
16: python3
17: refactoring
18-50: not yet used: reserved for future internal checkers.
51-99: perhaps used: reserved for external checkers
The raw_metrics checker has no number associated since it doesn't emit any
messages nor reports. XXX not true, emit a 07 report !
"""
import sys
import tokenize
import warnings
from typing import Any
from pylint.config import OptionsProviderMixIn
from pylint.reporters import diff_string
from pylint.utils import register_plugins
from pylint.interfaces import UNDEFINED
def table_lines_from_stats(stats, old_stats, columns):
"""get values listed in <columns> from <stats> and <old_stats>,
and return a formated list of values, designed to be given to a
ureport.Table object
"""
lines = []
for m_type in columns:
new = stats[m_type]
format = str # pylint: disable=redefined-builtin
if isinstance(new, float):
format = lambda num: "%.3f" % num
old = old_stats.get(m_type)
if old is not None:
diff_str = diff_string(old, new)
old = format(old)
else:
old, diff_str = "NC", "NC"
lines += (m_type.replace("_", " "), format(new), old, diff_str)
return lines
class BaseChecker(OptionsProviderMixIn):
"""base class for checkers"""
# checker name (you may reuse an existing one)
name = None # type: str
# options level (0 will be displaying in --help, 1 in --long-help)
level = 1
# ordered list of options to control the ckecker behaviour
options = () # type: Any
# messages issued by this checker
msgs = {} # type: Any
# reports issued by this checker
reports = () # type: Any
# mark this checker as enabled or not.
enabled = True
def __init__(self, linter=None):
"""checker instances should have the linter as argument
linter is an object implementing ILinter
"""
if self.name is not None:
self.name = self.name.lower()
OptionsProviderMixIn.__init__(self)
self.linter = linter
def add_message(
self,
msg_id,
line=None,
node=None,
args=None,
confidence=UNDEFINED,
col_offset=None,
):
"""add a message of a given type"""
self.linter.add_message(msg_id, line, node, args, confidence, col_offset)
# dummy methods implementing the IChecker interface
def open(self):
"""called before visiting project (i.e set of modules)"""
def close(self):
"""called after visiting project (i.e set of modules)"""
class BaseTokenChecker(BaseChecker):
"""Base class for checkers that want to have access to the token stream."""
def process_tokens(self, tokens):
"""Should be overridden by subclasses."""
raise NotImplementedError()
def initialize(linter):
"""initialize linter with checkers in this package """
register_plugins(linter, __path__[0])
__all__ = ("BaseChecker", "BaseTokenChecker", "initialize")
|
py | 1a4c00737583ab17c02a39b368db1c7eb88fba38 | """Splash_screen module."""
from PyQt6 import QtGui, QtCore, QtWidgets # type: ignore
from pineboolib.core.utils.utils_base import filedir
from pineboolib.core import settings
class SplashScreen(object):
"""Show a splashscreen to inform keep the user busy while Pineboo is warming up."""
_splash: "QtWidgets.QSplashScreen"
def __init__(self):
"""Inicialize."""
splash_path = filedir(
"./core/images/splashscreen/%s240.png"
% ("dbadmin" if settings.CONFIG.value("application/dbadmin_enabled") else "quick")
)
splash_pix = QtGui.QPixmap(splash_path)
self._splash = QtWidgets.QSplashScreen(
splash_pix, QtCore.Qt.WindowType.WindowStaysOnTopHint
)
self._splash.setMask(splash_pix.mask())
frame_geo = self._splash.frameGeometry()
primary_screen = QtGui.QGuiApplication.primaryScreen()
frame_geo.moveCenter(primary_screen.geometry().center())
self._splash.move(frame_geo.topLeft())
def showMessage(self, text: str) -> None:
"""Show a message into spalsh screen."""
self._splash.showMessage(
text, QtCore.Qt.AlignmentFlag.AlignLeft, QtCore.Qt.GlobalColor.white
)
def hide(self) -> None:
"""Hide splash screen."""
QtCore.QTimer.singleShot(1000, self._splash.hide)
def show(self) -> None:
"""Show splash screen."""
self._splash.show()
|
py | 1a4c012ed861463d60ff7351e540af04472335c7 | import torch
import torch.nn as nn
import torch.nn.functional as F
import src.data.data as data
import src.data.config as cfg
import src.models.utils as model_utils
import src.evaluate.utils as eval_utils
import src.train.batch as batch_utils
def make_sampler(sampler_type, opt, *args, **kwargs):
print("Initializing Greedy Sampler")
return GreedySampler(opt, *args, **kwargs)
class Sampler():
def __init__(self, opt, data_loader, batch_mode=False):
# Token on which to end sampling
self.end_token = data_loader.vocab_encoder[data.end_token]
self.opt = opt
def generate_sequence(self, batch, model):
raise
class GreedySampler(Sampler):
def __init__(self, opt, data_loader, batch_mode=True):
super(GreedySampler, self).__init__(opt, data_loader)
def append_batch(self, X, next_idx, mask):
next_pos = X[:, -1:, 1] + 1
next_x = torch.cat((next_idx, next_pos), -1).unsqueeze(1)
next_mask = torch.cat([mask, torch.ones(X.size(0), 1, device=mask.device)], 1)
return torch.cat((X, next_x), 1), next_mask
def generate_sequence(self, batch, model, data_loader, start_idx, end_len):
XMB = batch["sequences"][:, :start_idx]
MMB = batch["attention_mask"][:, :start_idx]
XMB = model_utils.prepare_position_embeddings(
self.opt, data_loader.vocab_encoder, XMB.unsqueeze(-1))
lm_probs = F.log_softmax(model(
XMB.unsqueeze(1), sequence_mask=MMB), dim=-1)
values, indices = lm_probs[:, -1, :].max(dim=-1)
seqs = indices.clone().unsqueeze(1)
loss = values
counts = 1
next_pos = XMB[:, -1:, 1] + 1
next_x = torch.cat((indices.view(-1, 1), next_pos), -1).unsqueeze(1)
XMB = torch.cat((XMB, next_x), 1)
MMB = torch.cat([MMB, torch.ones(XMB.size(0), 1, device=MMB.device)], 1)
# Sample from top k
for _ in range(self.opt.eval.smax):
lm_probs = F.log_softmax(model(
XMB.unsqueeze(1), sequence_mask=MMB), dim=-1)
# Sample from top k
values, next_idx = lm_probs[:, -1, :].max(dim=-1)
loss += values
counts += 1
next_idx = next_idx.unsqueeze(1)
seqs = torch.cat([seqs, next_idx], 1)
if (next_idx.item() == self.end_token) or (_ == end_len - 1):
break
XMB, MMB = self.append_batch(XMB, next_idx, MMB)
beams = []
for beam in seqs:
beams.append(" ".join("".join(
[data_loader.vocab_decoder[tok.item()].replace(
'</w>', ' ').replace('\n', '')
for tok in beam if tok != self.end_token]).split()))
sampling_result = {
"sequence": beams[0],
"beams": beams,
"beam_losses": [loss.item()],
"loss": loss.item(),
"beam_lengths": [counts],
"length": counts
}
return sampling_result
class TopKSampler(Sampler):
def __init__(self, opt, data_loader, batch_mode=True):
super(TopKSampler, self).__init__(opt, data_loader)
def append_batch(self, X, next_idx, mask):
next_pos = X[:, -1:, 1] + 1
next_x = torch.cat((next_idx, next_pos), -1).unsqueeze(1)
next_mask = torch.cat([mask, torch.ones(X.size(0), 1, device=mask.device)], 1)
return torch.cat((X, next_x), 1), next_mask
def generate_sequence(self, batch, model, data_loader, start_idx, end_len):
# start_idx = context_size_event + 1
# start_idx = max_e1 + max_r
# end_idx = context_size_effect - 1
# end_idx = max_e2
XMB = batch["sequences"][:, :start_idx]
MMB = batch["attention_mask"][:, :start_idx]
XMB = model_utils.prepare_position_embeddings(
self.opt, data_loader.vocab_encoder, XMB.unsqueeze(-1))
lm_probs = F.log_softmax(model(
XMB.unsqueeze(1), sequence_mask=MMB), dim=-1)
values, indices = lm_probs[:, -1, :].topk(self.opt.eval.k)
seqs = indices.t().clone()
losses = - values.view(-1, 1)
ended = (seqs == self.end_token).float()
counts = (1 - ended)
XMB = XMB.repeat(self.opt.eval.k, 1, 1)
MMB = MMB.repeat(self.opt.eval.k, 1)
next_pos = XMB[:, -1:, 1] + 1
next_x = torch.cat((indices.view(self.opt.eval.k, -1), next_pos), -1).unsqueeze(1)
XMB = torch.cat((XMB, next_x), 1)
MMB = torch.cat([MMB, torch.ones(XMB.size(0), 1, device=MMB.device)], 1)
# Sample from top k
for _ in range(end_len):
lm_probs = F.log_softmax(model(
XMB.unsqueeze(1), sequence_mask=MMB), dim=-1)
# Sample from top k
values, indices = lm_probs[:, -1, :].topk(self.opt.eval.k)
choice = torch.multinomial(values.exp(), 1)
next_idx = indices.gather(-1, choice)
ended = ended + (next_idx == self.end_token).float() * (1 - ended)
next_idx = next_idx * (1 - ended).long() + ended.long() * self.end_token
counts += (1 - ended)
seqs = torch.cat([seqs, next_idx], 1)
if ended.sum().item() == self.opt.eval.k:
break
losses -= values.gather(-1, choice) * (1 - ended)
XMB, MMB = self.append_batch(XMB, next_idx, MMB)
beams = []
for beam in seqs:
beams.append(" ".join("".join(
[data_loader.vocab_decoder[tok.item()].replace(
'</w>', ' ').replace('\n', '')
for tok in beam if tok != self.end_token]).split()))
sampling_result = {
"sequence": beams[0],
"beams": beams,
"beam_losses": losses.squeeze().tolist(),
"loss": losses[0].item(),
"beam_lengths": counts.long().squeeze().tolist(),
"length": counts[0].long().item()
}
return sampling_result
class BeamSampler(TopKSampler):
def __init__(self, opt, data_loader, batch_mode=True, scorer=None):
super(BeamSampler, self).__init__(opt, data_loader, batch_mode)
self.kill_mask = torch.ones(opt.eval.bs, opt.eval.bs).to(cfg.device) * 9000
self.kill_mask[:, 0] = 0
def make_batch(self, X):
X = np.array(X)
assert X.ndim in [1, 2]
if X.ndim == 1:
X = np.expand_dims(X, axis=0)
pos_enc = np.arange(n_vocab + n_special, n_vocab + n_special + X.shape[-1])
pos_enc = np.expand_dims(pos_enc, axis=0)
batch = np.stack([X, pos_enc], axis=-1)
batch = torch.tensor(batch, dtype=torch.long).to(device)
return batch
def append_batch(self, X, beam_toks, mask):
next_pos = X[:, -1:, 1] + 1
next_x = torch.cat((beam_toks.unsqueeze(1), next_pos), -1).unsqueeze(1)
next_mask = torch.cat([mask, torch.ones(X.size(0), 1, device=mask.device)], 1)
return torch.cat((X, next_x), 1), next_mask
def generate_sequence(self, batch, model, data_loader, start_idx, end_len):
# start_idx = context_size_event + 1
# start_idx = max_e1 + max_r
# end_idx = context_size_effect - 1
# end_idx = max_e2
XMB = batch["sequences"][:, :start_idx]
MMB = batch["attention_mask"][:, :start_idx]
XMB = model_utils.prepare_position_embeddings(
self.opt, data_loader.vocab_encoder, XMB.unsqueeze(-1))
tokens = []
beam_losses = []
# Beam Search
beam_lls, beam_toks, beam_seqs = None, None, None
lm_probs = F.log_softmax(model(
XMB.unsqueeze(1), sequence_mask=MMB), dim=-1)
dist = lm_probs[:, -1, :].squeeze()
beam_lls, beam_toks = dist.topk(self.opt.eval.bs)
beam_losses.append(beam_lls)
ended = (beam_toks == self.end_token).float()
counts = (2 - ended)
beam_toks = beam_toks.unsqueeze(1)
beam_seqs = beam_toks.clone()
XMB = XMB.repeat(self.opt.eval.bs, 1, 1)
MMB = MMB.repeat(self.opt.eval.bs, 1)
next_pos = XMB[:, -1:, 1] + 1
next_x = torch.cat((beam_toks, next_pos), -1).unsqueeze(1)
XMB = torch.cat((XMB, next_x), 1)
MMB = torch.cat([MMB, torch.ones(XMB.size(0), 1, device=MMB.device)], 1)
for _ in range(end_len):
# print(XMB.shape)
# Compute distribution for current beam
lm_probs = F.log_softmax(model(
XMB.unsqueeze(1), sequence_mask=MMB), dim=-1)
dist = lm_probs[:, -1, :].squeeze()
# get hypothesis tokens for distribution
hyp_beam_lls, hyp_beam_toks = dist.topk(self.opt.eval.bs)
# Compute masks and expand beam
expanded_ended = ended.unsqueeze(1).repeat(1, self.opt.eval.bs)
hypothesis_mask = expanded_ended * self.kill_mask + (1 - expanded_ended)
paper_results = False
if paper_results:
# Results from paper with slightly buggy beam search
current_beam_lls = beam_lls.unsqueeze(1).repeat(
1, self.opt.eval.bs).view(self.opt.eval.bs ** 2)
else:
# Current beam search implementation
current_beam_lls = beam_losses[-1].unsqueeze(1).repeat(
1, self.opt.eval.bs).view(self.opt.eval.bs ** 2)
# Compute losses of hypotheses, masking those that have ended
hyp_beam_lls = (hyp_beam_lls.view(self.opt.eval.bs ** 2) *
hypothesis_mask.view(-1)) + current_beam_lls
# Get normalizer for sequences
temp_counts = counts.unsqueeze(1).repeat(1, self.opt.eval.bs).view(
self.opt.eval.bs ** 2)
# Select best beams with lowest aggregate loss
beam_lls, top_beam_idxs = (hyp_beam_lls / temp_counts).topk(self.opt.eval.bs)
# Update placements in beam based on selecetion
beam_losses = [i.index_select(0, top_beam_idxs // self.opt.eval.bs)
for i in beam_losses]
ended = ended.index_select(0, top_beam_idxs // self.opt.eval.bs)
counts = temp_counts.index_select(0, top_beam_idxs)
# Save beam losses
beam_losses.append(beam_lls * counts)
# Update beam tokens
ended_mask = (1 - ended).long()
end_replacement = (self.end_token * ended).long()
next_toks = hyp_beam_toks.view(-1)[top_beam_idxs]
beam_toks = next_toks * ended_mask + end_replacement
# Update ended and counts
ended = ended + (beam_toks == self.end_token).float() * (1 - ended)
counts = counts + (1 - ended)
# Update beam sequences
beam_seqs = beam_seqs.t().repeat(self.opt.eval.bs, 1).t().contiguous().view(
self.opt.eval.bs ** 2, -1)[top_beam_idxs]
beam_seqs = torch.cat((beam_seqs, beam_toks.unsqueeze(1)), dim=1)
# I have no idea what's going on but Ari's on point with it
XMB = XMB.transpose(0, 1).transpose(1, 2).repeat(
self.opt.eval.bs, 1, 1).transpose(2, 1).transpose(
1, 0).contiguous().view(
self.opt.eval.bs ** 2, XMB.size(1), XMB.size(2))[top_beam_idxs]
XMB, MMB = self.append_batch(XMB, beam_toks, MMB)
if (beam_toks == self.end_token).sum().item() == self.opt.eval.bs:
break
beams = []
for beam in beam_seqs:
beams.append(" ".join("".join(
[data_loader.vocab_decoder[tok.item()].replace(
'</w>', ' ').replace('\n', '')
for tok in beam if tok != self.end_token]).split()))
sampling_result = {
"sequence": beams[0],
"beams": beams,
"beam_losses": beam_lls.tolist(),
"loss": beam_lls[0].item(),
"beam_lengths": counts.tolist(),
"length": counts[0].item()
}
return sampling_result
|
py | 1a4c045c48a9a20a440c65c21b2738327cb9359f | #!/usr/bin/env python
# Copyright 2016-2021 Biomedical Imaging Group Rotterdam, Departments of
# Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import WORC.addexceptions as WORCexceptions
import fastr
from fastr.api import ResourceLimit
import os
import graphviz
class Evaluate(object):
"""Build a network that evaluates the performance of an estimator."""
def __init__(self, label_type, modus='binary_classification', ensemble=50,
scores='percentages',
parent=None, features=None,
fastr_plugin='LinearExecution',
name='Example'):
"""
Initialize object.
Parameters
----------
network: fastr network, default None
If you input a network, the evaluate network is added
to the existing network.
"""
if parent is not None:
self.parent = parent
self.network = parent.network
self.mode = 'WORC'
self.name = parent.network.id
self.ensemble = parent.configs[0]['Ensemble']['Use']
else:
self.mode = 'StandAlone'
self.fastr_plugin = fastr_plugin
self.name = 'WORC_Evaluate_' + name
self.network = fastr.create_network(id=self.name)
self.fastr_tmpdir = os.path.join(fastr.config.mounts['tmp'], self.name)
self.ensemble = ensemble
if features is None and self.mode == 'StandAlone':
raise WORCexceptions.WORCIOError('Either features as input or a WORC network is required for the Evaluate network.')
self.modus = modus
self.features = features
self.label_type = label_type
self.create_network()
def create_network(self):
"""Add evaluate components to network."""
# Create all nodes
if self.modus == 'binary_classification':
self.node_ROC =\
self.network.create_node('worc/PlotROC:1.0', tool_version='1.0',
id='plot_ROC',
resources=ResourceLimit(memory='12G'),
step_id='Evaluation')
if self.mode == 'StandAlone':
self.node_Estimator =\
self.network.create_node('worc/PlotEstimator:1.0', tool_version='1.0',
id='plot_Estimator',
resources=ResourceLimit(memory='12G'),
step_id='Evaluation')
self.node_Barchart =\
self.network.create_node('worc/PlotBarchart:1.0',
tool_version='1.0', id='plot_Barchart',
resources=ResourceLimit(memory='12G'),
step_id='Evaluation')
self.node_Hyperparameters =\
self.network.create_node('worc/PlotHyperparameters:1.0',
tool_version='1.0', id='plot_Hyperparameters',
resources=ResourceLimit(memory='6G'),
step_id='Evaluation')
if 'classification' in self.modus:
self.node_STest =\
self.network.create_node('worc/StatisticalTestFeatures:1.0',
tool_version='1.0',
id='statistical_test_features',
resources=ResourceLimit(memory='12G'),
step_id='Evaluation')
self.node_decomposition =\
self.network.create_node('worc/Decomposition:1.0',
tool_version='1.0',
id='decomposition',
resources=ResourceLimit(memory='12G'),
step_id='Evaluation')
self.node_Ranked_Percentages =\
self.network.create_node('worc/PlotRankedScores:1.0',
tool_version='1.0',
id='plot_ranked_percentages',
resources=ResourceLimit(memory='20G'),
step_id='Evaluation')
self.node_Ranked_Posteriors =\
self.network.create_node('worc/PlotRankedScores:1.0',
tool_version='1.0',
id='plot_ranked_posteriors',
resources=ResourceLimit(memory='20G'),
step_id='Evaluation')
self.node_Boxplots_Features =\
self.network.create_node('worc/PlotBoxplotFeatures:1.0',
tool_version='1.0',
id='plot_boxplot_features',
resources=ResourceLimit(memory='12G'),
step_id='Evaluation')
# Create sinks
if self.modus == 'binary_classification':
self.sink_ROC_PNG =\
self.network.create_sink('PNGFile', id='ROC_PNG',
step_id='general_sinks')
self.sink_ROC_Tex =\
self.network.create_sink('TexFile', id='ROC_Tex',
step_id='general_sinks')
self.sink_ROC_CSV =\
self.network.create_sink('CSVFile', id='ROC_CSV',
step_id='general_sinks')
self.sink_PRC_PNG =\
self.network.create_sink('PNGFile', id='PRC_PNG',
step_id='general_sinks')
self.sink_PRC_Tex =\
self.network.create_sink('TexFile', id='PRC_Tex',
step_id='general_sinks')
self.sink_PRC_CSV =\
self.network.create_sink('CSVFile', id='PRC_CSV',
step_id='general_sinks')
if self.mode == 'StandAlone':
self.sink_Estimator_Json =\
self.network.create_sink('JsonFile', id='Estimator_Json',
step_id='general_sinks')
self.sink_Barchart_PNG =\
self.network.create_sink('PNGFile', id='Barchart_PNG',
step_id='general_sinks')
self.sink_Barchart_Tex =\
self.network.create_sink('TexFile',
id='Barchart_Tex',
step_id='general_sinks')
self.sink_Hyperparameters_CSV =\
self.network.create_sink('CSVFile', id='Hyperparameters_CSV',
step_id='general_sinks')
if 'classification' in self.modus:
self.sink_STest_CSV =\
self.network.create_sink('CSVFile',
id='StatisticalTestFeatures_CSV',
step_id='general_sinks')
self.sink_STest_PNG =\
self.network.create_sink('PNGFile',
id='StatisticalTestFeatures_PNG',
step_id='general_sinks')
self.sink_STest_Tex =\
self.network.create_sink('TexFile',
id='StatisticalTestFeatures_Tex',
step_id='general_sinks')
self.sink_decomposition_PNG =\
self.network.create_sink('PNGFile', id='Decomposition_PNG',
step_id='general_sinks')
self.sink_Ranked_Percentages_Zip =\
self.network.create_sink('ZipFile', id='RankedPercentages_Zip',
step_id='general_sinks')
self.sink_Ranked_Percentages_CSV =\
self.network.create_sink('CSVFile', id='RankedPercentages_CSV',
step_id='general_sinks')
self.sink_Ranked_Posteriors_Zip =\
self.network.create_sink('ZipFile', id='RankedPosteriors_Zip',
step_id='general_sinks')
self.sink_Ranked_Posteriors_CSV =\
self.network.create_sink('CSVFile', id='RankedPosteriors_CSV',
step_id='general_sinks')
self.sink_Boxplots_Features_Zip =\
self.network.create_sink('ZipFile', id='BoxplotsFeatures_Zip',
step_id='general_sinks')
# Create links to sinks
if self.modus == 'binary_classification':
self.sink_ROC_PNG.input = self.node_ROC.outputs['ROC_png']
self.sink_ROC_Tex.input = self.node_ROC.outputs['ROC_tex']
self.sink_ROC_CSV.input = self.node_ROC.outputs['ROC_csv']
self.sink_PRC_PNG.input = self.node_ROC.outputs['PRC_png']
self.sink_PRC_Tex.input = self.node_ROC.outputs['PRC_tex']
self.sink_PRC_CSV.input = self.node_ROC.outputs['PRC_csv']
if self.mode == 'StandAlone':
self.sink_Estimator_Json.input = self.node_Estimator.outputs['output_json']
self.sink_Barchart_PNG.input = self.node_Barchart.outputs['output_png']
self.sink_Barchart_Tex.input = self.node_Barchart.outputs['output_tex']
self.sink_Hyperparameters_CSV.input = self.node_Hyperparameters.outputs['output_csv']
if 'classification' in self.modus:
self.sink_STest_CSV.input = self.node_STest.outputs['output_csv']
self.sink_STest_PNG.input = self.node_STest.outputs['output_png']
self.sink_STest_Tex.input = self.node_STest.outputs['output_tex']
self.sink_decomposition_PNG.input = self.node_decomposition.outputs['output']
self.sink_Ranked_Percentages_Zip.input =\
self.node_Ranked_Percentages.outputs['output_zip']
self.sink_Ranked_Percentages_CSV.input =\
self.node_Ranked_Percentages.outputs['output_csv']
# Create constant node
self.node_Ranked_Percentages.inputs['scores'] = ['percentages']
self.sink_Ranked_Posteriors_Zip.input =\
self.node_Ranked_Posteriors.outputs['output_zip']
self.sink_Ranked_Posteriors_CSV.input =\
self.node_Ranked_Posteriors.outputs['output_csv']
self.sink_Boxplots_Features_Zip.input =\
self.node_Boxplots_Features.outputs['output_zip']
# Create constant node
self.node_Ranked_Posteriors.inputs['scores'] = ['posteriors']
if self.mode == 'StandAlone':
self.source_LabelType =\
self.network.create_constant('String', [self.label_type],
id='LabelType',
step_id='Evaluation')
self.source_Ensemble =\
self.network.create_constant('String', [self.ensemble],
id='Ensemble',
step_id='Evaluation')
# Create sources if not supplied by a WORC network
if self.mode == 'StandAlone':
self.source_Estimator =\
self.network.create_source('HDF5', id='Estimator')
self.source_PatientInfo =\
self.network.create_source('PatientInfoFile', id='PatientInfo')
self.source_Images =\
self.network.create_source('ITKImageFile', id='Images',
node_group='patients')
self.source_Segmentations =\
self.network.create_source('ITKImageFile', id='Segmentations',
node_group='patients')
self.source_Config =\
self.network.create_source('ParameterFile', id='Config')
self.labels = list()
self.source_Features = list()
for idx in range(0, len(self.features)):
label = 'Features_' + str(idx)
self.labels.append(label)
self.source_Features.append(self.network.create_source('HDF5', id=label, node_group='features'))
# Create links to the sources that could be in a WORC network
if self.mode == 'StandAlone':
self.create_links_Standalone()
else:
self.create_links_Addon()
def create_links_Standalone(self):
"""Create links in network between nodes when using standalone."""
# Sources from the Evaluate network are used
if self.modus == 'binary_classification':
self.node_ROC.inputs['prediction'] = self.source_Estimator.output
self.node_ROC.inputs['pinfo'] = self.source_PatientInfo.output
self.node_Estimator.inputs['prediction'] = self.source_Estimator.output
self.node_Estimator.inputs['pinfo'] = self.source_PatientInfo.output
self.node_Barchart.inputs['prediction'] = self.source_Estimator.output
self.node_Hyperparameters.inputs['prediction'] = self.source_Estimator.output
if 'classification' in self.modus:
self.links_STest_Features = list()
self.links_decomposition_Features = list()
self.links_Boxplots_Features = list()
for idx, label in enumerate(self.labels):
if 'classification' in self.modus:
self.links_STest_Features.append(self.node_STest.inputs['features'][str(label)] << self.source_Features[idx].output)
self.links_STest_Features[idx].collapse = 'features'
self.links_decomposition_Features.append(self.node_decomposition.inputs['features'][str(label)] << self.source_Features[idx].output)
self.links_decomposition_Features[idx].collapse = 'features'
self.links_Boxplots_Features.append(self.node_Boxplots_Features.inputs['features'][str(label)] << self.source_Features[idx].output)
self.links_Boxplots_Features[idx].collapse = 'features'
if 'classification' in self.modus:
self.node_STest.inputs['patientclass'] = self.source_PatientInfo.output
self.node_STest.inputs['config'] = self.source_Config.output
self.node_decomposition.inputs['patientclass'] = self.source_PatientInfo.output
self.node_decomposition.inputs['config'] = self.source_Config.output
self.node_Ranked_Percentages.inputs['estimator'] = self.source_Estimator.output
self.node_Ranked_Percentages.inputs['pinfo'] = self.source_PatientInfo.output
self.link_images_perc = self.network.create_link(self.source_Images.output, self.node_Ranked_Percentages.inputs['images'])
self.link_images_perc.collapse = 'patients'
self.link_segmentations_perc = self.network.create_link(self.source_Segmentations.output, self.node_Ranked_Percentages.inputs['segmentations'])
self.link_segmentations_perc.collapse = 'patients'
self.node_Boxplots_Features.inputs['patientclass'] = self.source_PatientInfo.output
self.node_Boxplots_Features.inputs['config'] = self.source_Config.output
self.node_Ranked_Posteriors.inputs['estimator'] = self.source_Estimator.output
self.node_Ranked_Posteriors.inputs['pinfo'] = self.source_PatientInfo.output
self.link_images_post = self.network.create_link(self.source_Images.output, self.node_Ranked_Posteriors.inputs['images'])
self.link_images_post.collapse = 'patients'
self.link_segmentations_post = self.network.create_link(self.source_Segmentations.output, self.node_Ranked_Posteriors.inputs['segmentations'])
self.link_segmentations_post.collapse = 'patients'
if self.modus == 'binary_classification':
self.node_ROC.inputs['ensemble'] = self.source_Ensemble.output
self.node_ROC.inputs['label_type'] = self.source_LabelType.output
if 'classification' in self.modus:
self.node_Ranked_Percentages.inputs['ensemble'] =\
self.source_Ensemble.output
self.node_Ranked_Percentages.inputs['label_type'] =\
self.source_LabelType.output
self.node_Estimator.inputs['ensemble'] = self.source_Ensemble.output
self.node_Estimator.inputs['label_type'] = self.source_LabelType.output
self.node_Barchart.inputs['estimators'] = self.source_Ensemble.output
self.node_Barchart.inputs['label_type'] = self.source_LabelType.output
self.node_Hyperparameters.inputs['estimators'] = self.source_Ensemble.output
self.node_Hyperparameters.inputs['label_type'] = self.source_LabelType.output
self.node_Ranked_Posteriors.inputs['ensemble'] =\
self.source_Ensemble.output
self.node_Ranked_Posteriors.inputs['label_type'] =\
self.source_LabelType.output
def create_links_Addon(self):
"""Create links in network between nodes when adding Evaluate to WORC."""
# Sources from the WORC network are used
prediction = self.parent.classify.outputs['classification']
if hasattr(self.parent, 'source_patientclass_test'):
pinfo = self.parent.source_patientclass_test.output
else:
pinfo = self.parent.source_patientclass_train.output
config = self.parent.source_class_config.output
if hasattr(self.parent, 'sources_images_train'):
if self.parent.sources_images_train:
# NOTE: Use images of first modality to depict tumor
label = self.parent.modlabels[0]
images = self.parent.sources_images_train[label].output
segmentations =\
self.parent.sources_segmentations_train[label].output
if self.modus == 'binary_classification':
self.node_ROC.inputs['ensemble'] = self.parent.source_Ensemble.output
self.node_ROC.inputs['label_type'] = self.parent.source_LabelType.output
if 'classification' in self.modus:
self.node_Ranked_Percentages.inputs['ensemble'] =\
self.parent.source_Ensemble.output
self.node_Ranked_Percentages.inputs['label_type'] =\
self.parent.source_LabelType.output
self.node_Barchart.inputs['estimators'] = self.parent.source_Ensemble.output
self.node_Barchart.inputs['label_type'] = self.parent.source_LabelType.output
self.node_Hyperparameters.inputs['estimators'] = self.parent.source_Ensemble.output
self.node_Hyperparameters.inputs['label_type'] = self.parent.source_LabelType.output
self.node_Ranked_Posteriors.inputs['ensemble'] =\
self.parent.source_Ensemble.output
self.node_Ranked_Posteriors.inputs['label_type'] =\
self.parent.source_LabelType.output
if self.modus == 'binary_classification':
self.node_ROC.inputs['prediction'] = prediction
self.node_ROC.inputs['pinfo'] = pinfo
self.node_Barchart.inputs['prediction'] = prediction
self.node_Hyperparameters.inputs['prediction'] = prediction
if 'classification' in self.modus:
self.links_STest_Features = dict()
self.links_decomposition_Features = dict()
self.links_Boxplots_Features = dict()
# Check if we have ComBat features
if self.parent.configs[0]['General']['ComBat'] == 'True':
name = 'ComBat'
# Take features from ComBat
if 'classification' in self.modus:
self.links_STest_Features[name] =\
self.network.create_link(self.parent.ComBat.outputs['features_train_out'], self.node_STest.inputs['features'])
self.links_decomposition_Features[name] =\
self.network.create_link(self.parent.ComBat.outputs['features_train_out'], self.node_decomposition.inputs['features'])
self.links_Boxplots_Features[name] =\
self.network.create_link(self.parent.ComBat.outputs['features_train_out'], self.node_Boxplots_Features.inputs['features'])
# All features should be input at once
if 'classification' in self.modus:
self.links_STest_Features[name].collapse = 'ComBat'
self.links_decomposition_Features[name].collapse = 'ComBat'
self.links_Boxplots_Features[name].collapse = 'ComBat'
else:
for idx, label in enumerate(self.parent.modlabels):
# NOTE: Currently statistical testing is only done within the training set
if hasattr(self.parent, 'sources_images_train'):
if self.parent.sources_images_train:
# Take features directly from feature computation toolboxes
for node in self.parent.featureconverter_train[label]:
name = node.id
if 'classification' in self.modus:
self.links_STest_Features[name] =\
self.node_STest.inputs['features'][name] << node.outputs['feat_out']
self.links_decomposition_Features[name] =\
self.node_decomposition.inputs['features'][name] << node.outputs['feat_out']
self.links_Boxplots_Features[name] =\
self.node_Boxplots_Features.inputs['features'][name] << node.outputs['feat_out']
# All features should be input at once
if 'classification' in self.modus:
self.links_STest_Features[name].collapse = 'train'
self.links_decomposition_Features[name].collapse = 'train'
self.links_Boxplots_Features[name].collapse = 'train'
else:
# Feature are precomputed and given as sources
for node in self.parent.sources_features_train.values():
name = node.id
if 'classification' in self.modus:
self.links_STest_Features[name] =\
self.node_STest.inputs['features'][name] << node.output
self.links_decomposition_Features[name] =\
self.node_decomposition.inputs['features'][name] << node.output
self.links_Boxplots_Features[name] =\
self.node_Boxplots_Features.inputs['features'][name] << node.output
# All features should be input at once
if 'classification' in self.modus:
self.links_STest_Features[name].collapse = 'train'
self.links_decomposition_Features[name].collapse = 'train'
self.links_Boxplots_Features[name].collapse = 'train'
else:
# Feature are precomputed and given as sources
for node in self.parent.sources_features_train.values():
name = node.id
if 'classification' in self.modus:
self.links_STest_Features[name] =\
self.node_STest.inputs['features'][name] << node.output
self.links_decomposition_Features[name] =\
self.node_decomposition.inputs['features'][name] << node.output
self.links_Boxplots_Features[name] =\
self.node_Boxplots_Features.inputs['features'][name] << node.output
# All features should be input at once
if 'classification' in self.modus:
self.links_STest_Features[name].collapse = 'train'
self.links_decomposition_Features[name].collapse = 'train'
self.links_Boxplots_Features[name].collapse = 'train'
if 'classification' in self.modus:
self.node_STest.inputs['patientclass'] = pinfo
self.node_STest.inputs['config'] = config
self.node_decomposition.inputs['patientclass'] = pinfo
self.node_decomposition.inputs['config'] = config
self.node_Ranked_Percentages.inputs['estimator'] = prediction
self.node_Ranked_Percentages.inputs['pinfo'] = pinfo
self.node_Boxplots_Features.inputs['patientclass'] = pinfo
self.node_Boxplots_Features.inputs['config'] = config
self.node_Ranked_Posteriors.inputs['estimator'] = prediction
self.node_Ranked_Posteriors.inputs['pinfo'] = pinfo
if hasattr(self.parent, 'sources_images_test'):
images = self.parent.sources_images_test[label].output
segmentations =\
self.parent.sources_segmentations_test[label].output
if 'classification' in self.modus:
self.link_images_perc =\
self.network.create_link(images, self.node_Ranked_Percentages.inputs['images'])
self.link_images_perc.collapse = 'test'
self.link_segmentations_perc =\
self.network.create_link(segmentations, self.node_Ranked_Percentages.inputs['segmentations'])
self.link_segmentations_perc.collapse = 'test'
self.link_images_post =\
self.network.create_link(images, self.node_Ranked_Posteriors.inputs['images'])
self.link_images_post.collapse = 'test'
self.link_segmentations_post =\
self.network.create_link(segmentations, self.node_Ranked_Posteriors.inputs['segmentations'])
self.link_segmentations_post.collapse = 'test'
elif hasattr(self.parent, 'sources_images_train'):
if self.parent.sources_images_train:
if 'classification' in self.modus:
self.link_images_perc =\
self.network.create_link(images, self.node_Ranked_Percentages.inputs['images'])
self.link_images_perc.collapse = 'train'
self.link_segmentations_perc =\
self.network.create_link(segmentations, self.node_Ranked_Percentages.inputs['segmentations'])
self.link_segmentations_perc.collapse = 'train'
self.link_images_post =\
self.network.create_link(images, self.node_Ranked_Posteriors.inputs['images'])
self.link_images_post.collapse = 'train'
self.link_segmentations_post =\
self.network.create_link(segmentations, self.node_Ranked_Posteriors.inputs['segmentations'])
self.link_segmentations_post.collapse = 'train'
def set(self, estimator=None, pinfo=None, images=None,
segmentations=None, config=None, features=None,
sink_data={}):
"""Set the sources and sinks based on the provided attributes."""
if self.mode == 'StandAlone':
self.source_data = dict()
self.sink_data = dict()
self.source_data['Estimator'] = estimator
self.source_data['PatientInfo'] = pinfo
self.source_data['Images'] = images
self.source_data['Segmentations'] = segmentations
self.source_data['Config'] = config
self.source_data['LabelType'] = self.label_type
self.source_data['Ensemble'] = self.ensemble
for feature, label in zip(features, self.labels):
self.source_data[label] = feature
else:
self.sink_data = self.parent.sink_data
if self.modus == 'binary_classification':
if 'ROC_PNG' not in sink_data.keys():
self.sink_data['ROC_PNG'] = ("vfs://output/{}/Evaluation/ROC_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'ROC_Tex' not in sink_data.keys():
self.sink_data['ROC_Tex'] = ("vfs://output/{}/Evaluation/ROC_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'ROC_CSV' not in sink_data.keys():
self.sink_data['ROC_CSV'] = ("vfs://output/{}/Evaluation/ROC_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'PRC_PNG' not in sink_data.keys():
self.sink_data['PRC_PNG'] = ("vfs://output/{}/Evaluation/PRC_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'PRC_Tex' not in sink_data.keys():
self.sink_data['PRC_Tex'] = ("vfs://output/{}/Evaluation/PRC_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'PRC_CSV' not in sink_data.keys():
self.sink_data['PRC_CSV'] = ("vfs://output/{}/Evaluation/PRC_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'Estimator_Json' not in sink_data.keys():
self.sink_data['Estimator_Json'] = ("vfs://output/{}/Evaluation/performance_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'Barchart_PNG' not in sink_data.keys():
self.sink_data['Barchart_PNG'] = ("vfs://output/{}/Evaluation/Barchart_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'Barchart_Tex' not in sink_data.keys():
self.sink_data['Barchart_Tex'] = ("vfs://output/{}/Evaluation/Barchart_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'Hyperparameters_CSV' not in sink_data.keys():
self.sink_data['Hyperparameters_CSV'] = ("vfs://output/{}/Evaluation/Hyperparameters_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'classification' in self.modus:
if 'StatisticalTestFeatures_CSV' not in sink_data.keys():
self.sink_data['StatisticalTestFeatures_CSV'] = ("vfs://output/{}/Evaluation/StatisticalTestFeatures_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'StatisticalTestFeatures_PNG' not in sink_data.keys():
self.sink_data['StatisticalTestFeatures_PNG'] = ("vfs://output/{}/Evaluation/StatisticalTestFeatures_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'StatisticalTestFeatures_Tex' not in sink_data.keys():
self.sink_data['StatisticalTestFeatures_Tex'] = ("vfs://output/{}/Evaluation/StatisticalTestFeatures_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'Decomposition_PNG' not in sink_data.keys():
self.sink_data['Decomposition_PNG'] = ("vfs://output/{}/Evaluation/Decomposition_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'RankedPercentages_Zip' not in sink_data.keys():
self.sink_data['RankedPercentages_Zip'] = ("vfs://output/{}/Evaluation/RankedPercentages_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'RankedPercentages_CSV' not in sink_data.keys():
self.sink_data['RankedPercentages_CSV'] = ("vfs://output/{}/Evaluation/RankedPercentages_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'RankedPosteriors_Zip' not in sink_data.keys():
self.sink_data['RankedPosteriors_Zip'] = ("vfs://output/{}/Evaluation/RankedPosteriors_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'RankedPosteriors_CSV' not in sink_data.keys():
self.sink_data['RankedPosteriors_CSV'] = ("vfs://output/{}/Evaluation/RankedPosteriors_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'BoxplotsFeatures_Zip' not in sink_data.keys():
self.sink_data['BoxplotsFeatures_Zip'] = ("vfs://output/{}/Evaluation/BoxplotsFeatures_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
def execute(self):
"""Execute the network through the fastr.network.execute command."""
# Draw and execute nwtwork
try:
self.network.draw(file_path=self.network.id + '.svg',
draw_dimensions=True)
except graphviz.backend.ExecutableNotFound:
print('[WORC WARNING] Graphviz executable not found: not drawing network diagram. MAke sure the Graphviz executables are on your systems PATH.')
self.network.execute(self.source_data, self.sink_data,
execution_plugin=self.fastr_plugin,
tmpdir=self.fastr_tmpdir)
|
py | 1a4c0473eeaef09e1ec253cf7548bb3e05d13884 | from .regularize import set_regularizer |
py | 1a4c04c0fd6675f304983998c76cfa734e45b393 | import numpy as np
import torch
import torch.nn as nn
class CNNCTC(nn.Module):
def __init__(self, class_num, mode='train'):
super(CNNCTC, self).__init__()
feature = [
nn.Conv2d(3, 50, stride=1, kernel_size=3, padding=1),
nn.BatchNorm2d(50),
nn.ReLU(inplace=True),
nn.Conv2d(50, 100, stride=1, kernel_size=3, padding=1),
nn.Dropout(p=0.1),
nn.Conv2d(100, 100, stride=1, kernel_size=3, padding=1),
nn.Dropout(p=0.1),
nn.BatchNorm2d(100),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(100, 200, stride=1, kernel_size=3, padding=1),
nn.Dropout(p=0.2),
nn.Conv2d(200, 200, stride=1, kernel_size=3, padding=1),
nn.Dropout(p=0.2),
nn.BatchNorm2d(200),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(200, 250, stride=1, kernel_size=3, padding=1),
nn.Dropout(p=0.3),
nn.BatchNorm2d(250),
nn.ReLU(inplace=True),
nn.Conv2d(250, 300, stride=1, kernel_size=3, padding=1),
nn.Dropout(p=0.3),
nn.Conv2d(300, 300, stride=1, kernel_size=3, padding=1),
nn.Dropout(p=0.3),
nn.BatchNorm2d(300),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(300, 350, stride=1, kernel_size=3, padding=1),
nn.Dropout(p=0.4),
nn.BatchNorm2d(350),
nn.ReLU(inplace=True),
nn.Conv2d(350, 400, stride=1, kernel_size=3, padding=1),
nn.Dropout(p=0.4),
nn.Conv2d(400, 400, stride=1, kernel_size=3, padding=1),
nn.Dropout(p=0.4),
nn.BatchNorm2d(400),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2)
]
classifier = [
nn.Linear(1600, 900),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
# nn.Linear(900, 200),
# nn.ReLU(inplace=True),
nn.Linear(900, class_num)
]
self.mode = mode
self.feature = nn.Sequential(*feature)
self.classifier = nn.Sequential(*classifier)
def forward(self, x): # x: batch, window, slice channel, h, w
result = []
for s in range(x.shape[1]):
result.append(self.single_forward(x[:, s, :, :, :]))
out = torch.stack(result)
if self.mode != 'train':
return self.decode(out)
return out
def single_forward(self, x):
feat = self.feature(x)
feat = feat.view(feat.shape[0], -1) # flatten
out = self.classifier(feat)
return out
def decode(self, pred):
pred = pred.permute(1, 0, 2).cpu().data.numpy() # batch, step, class
seq = []
for i in range(pred.shape[0]):
seq.append(self.pred_to_string(pred[i]))
return seq
@staticmethod
def pred_to_string(pred): # step, class
seq = []
for i in range(pred.shape[0]):
label = np.argmax(pred[i])
seq.append(label)
out = []
for i in range(len(seq)):
if len(out) == 0:
if seq[i] != 0:
out.append(seq[i])
else:
if seq[i] != 0 and seq[i] != seq[i - 1]:
out.append(seq[i])
return out
|
py | 1a4c04f82e085fd41d0891fc51e5db05097d51c6 | # Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
# See: https://spdx.org/licenses/
import typing as ty
import numpy as np
import numpy.typing as npt
class CoefficientTensorsMixin:
def __init__(self, *coefficients: ty.Union[ty.List, npt.ArrayLike]):
"""Coefficients for a scalar function of a vector.
Parameters
----------
coefficients: the tensor coefficients of the function.
"""
c_dict = dict()
for coefficient in coefficients:
if type(coefficient) in [list, int]:
coefficient = np.asarray(coefficient)
rank = coefficient.ndim
elif type(coefficient) is not np.ndarray:
raise ValueError(
"Coefficients should be either Numpy arrays "
"or (possibly nested) lists."
)
else:
rank = coefficient.ndim
c_dict[rank] = coefficient
self._coefficients = c_dict
@property
def coefficients(self):
return self._coefficients
@coefficients.setter
def coefficients(self, value):
self._coefficients = value
def get_coefficient(self, order: int):
try:
return self.coefficients[order]
except KeyError:
print(
f"""Order {order} not found, coefficients were only given for
orders: {list(self.coefficients.keys())}."""
)
raise
@property
def max_degree(self):
"""Maximum order among the coefficients' ranks."""
return max(self.coefficients.keys())
|
py | 1a4c05637a2fca287f7780be726b414a86cc1bdf |
"""Utilities for downloading data from WMT, tokenizing, vocabularies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import re
import tarfile
from six.moves import urllib
import numpy as np
from tensorflow.python.platform import gfile
import tensorflow as tf
# Special vocabulary symbols - we always put them at the start.
_PAD = b"_PAD"
_GO = b"_GO"
_EOS = b"_EOS"
_UNK = b"_UNK"
_START_VOCAB = [_PAD, _GO, _EOS, _UNK]
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3
# Regular expressions used to tokenize.
_WORD_SPLIT = re.compile(b"([.,!?\"':;)(])")
_DIGIT_RE = re.compile(br"\d")
def maybe_download(directory, filename, url):
"""Download filename from url unless it's already in directory."""
if not os.path.exists(directory):
print("Creating directory %s" % directory)
os.mkdir(directory)
filepath = os.path.join(directory, filename)
if not os.path.exists(filepath):
print("Downloading %s to %s" % (url, filepath))
filepath, _ = urllib.request.urlretrieve(url, filepath)
statinfo = os.stat(filepath)
print("Succesfully downloaded", filename, statinfo.st_size, "bytes")
return filepath
def gunzip_file(gz_path, new_path):
"""Unzips from gz_path into new_path."""
print("Unpacking %s to %s" % (gz_path, new_path))
with gzip.open(gz_path, "rb") as gz_file:
with open(new_path, "wb") as new_file:
for line in gz_file:
new_file.write(line)
def get_wmt_enfr_train_set(directory):
"""Download the WMT en-fr training corpus to directory unless it's there."""
train_path = os.path.join(directory, "train")
return train_path
def get_wmt_enfr_dev_set(directory):
"""Download the WMT en-fr training corpus to directory unless it's there."""
dev_name = "validate"
dev_path = os.path.join(directory, dev_name)
return dev_path
def basic_tokenizer(sentence):
"""Very basic tokenizer: split the sentence into a list of tokens."""
words = []
for space_separated_fragment in sentence.strip().split():
words.extend(_WORD_SPLIT.split(space_separated_fragment))
return [w for w in words if w]
def create_vocabulary_source(vocabulary_path, data_path, max_vocabulary_size,
tokenizer=None, normalize_digits=True):
"""Create vocabulary file (if it does not exist yet) from data file.
Data file is assumed to contain one sentence per line. Each sentence is
tokenized and digits are normalized (if normalize_digits is set).
Vocabulary contains the most-frequent tokens up to max_vocabulary_size.
We write it to vocabulary_path in a one-token-per-line format, so that later
token in the first line gets id=0, second line gets id=1, and so on.
Args:
vocabulary_path: path where the vocabulary will be created.
data_path: data file that will be used to create vocabulary.
max_vocabulary_size: limit on the size of the created vocabulary.
tokenizer: a function to use to tokenize each data sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
if not gfile.Exists(vocabulary_path):
print("Creating vocabulary %s from data %s" % (vocabulary_path, data_path))
vocab = {}
with gfile.GFile(data_path, mode="rb") as f:
counter = 0
for line in f:
counter += 1
if counter % 100000 == 0:
print(" processing line %d" % counter)
for fact in facts:
for w in fact:
word = w.encode('UTF-8')
if word in vocab:
vocab[word] += 1
else:
vocab[word] = 1
vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)
if len(vocab_list) > max_vocabulary_size:
vocab_list = vocab_list[:max_vocabulary_size]
with gfile.GFile(vocabulary_path, mode="wb") as vocab_file:
for w in vocab_list:
vocab_file.write(w + b"\n")
def create_vocabulary_target(vocabulary_path, data_path, max_vocabulary_size,
tokenizer=None, normalize_digits=True):
"""Create vocabulary file (if it does not exist yet) from data file.
Data file is assumed to contain one sentence per line. Each sentence is
tokenized and digits are normalized (if normalize_digits is set).
Vocabulary contains the most-frequent tokens up to max_vocabulary_size.
We write it to vocabulary_path in a one-token-per-line format, so that later
token in the first line gets id=0, second line gets id=1, and so on.
Args:
vocabulary_path: path where the vocabulary will be created.
data_path: data file that will be used to create vocabulary.
max_vocabulary_size: limit on the size of the created vocabulary.
tokenizer: a function to use to tokenize each data sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
if not gfile.Exists(vocabulary_path):
print("Creating vocabulary %s from data %s" % (vocabulary_path, data_path))
vocab = {}
with gfile.GFile(data_path, mode="rb") as f:
counter = 0
for line in f:
counter += 1
if counter % 100000 == 0:
print(" processing line %d" % counter)
tokens=eval(line)
for w in tokens:
word = w.replace('\n', '\\n')
if word in vocab:
vocab[word] += 1
else:
vocab[word] = 1
vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)
if len(vocab_list) > max_vocabulary_size:
vocab_list = vocab_list[:max_vocabulary_size]
with gfile.GFile(vocabulary_path, mode="wb") as vocab_file:
for w in vocab_list:
vocab_file.write(w + b"\n")
def initialize_vocabulary(vocabulary_path):
"""Initialize vocabulary from file.
We assume the vocabulary is stored one-item-per-line, so a file:
dog
cat
will result in a vocabulary {"dog": 0, "cat": 1}, and this function will
also return the reversed-vocabulary ["dog", "cat"].
Args:
vocabulary_path: path to the file containing the vocabulary.
Returns:
a pair: the vocabulary (a dictionary mapping string to integers), and
the reversed vocabulary (a list, which reverses the vocabulary mapping).
Raises:
ValueError: if the provided vocabulary_path does not exist.
"""
if gfile.Exists(vocabulary_path):
rev_vocab = []
with gfile.GFile(vocabulary_path, mode="rb") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [line[:-1] for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path)
def sentence_to_token_ids(sentence, vocabulary,
tokenizer=None, normalize_digits=True):
"""Convert a string to list of integers representing token-ids.
For example, a sentence "I have a dog" may become tokenized into
["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2,
"a": 4, "dog": 7"} this function will return [1, 2, 4, 7].
Args:
sentence: the sentence in bytes format to convert to token-ids.
vocabulary: a dictionary mapping tokens to integers.
tokenizer: a function to use to tokenize each sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
Returns:
a list of integers, the token-ids for the sentence.
"""
v = [vocabulary.get(w.encode('UTF-8'), UNK_ID) for w in sentence]
return v
def data_to_token_ids_source(data_path, target_path, vocabulary_path,
tokenizer=None, normalize_digits=False):
"""Tokenize data file and turn into token-ids using given vocabulary file.
This function loads data line-by-line from data_path, calls the above
sentence_to_token_ids, and saves the result to target_path. See comment
for sentence_to_token_ids on the details of token-ids format.
Args:
data_path: path to the data file in one-sentence-per-line format.
target_path: path where the file with token-ids will be created.
vocabulary_path: path to the vocabulary file.
tokenizer: a function to use to tokenize each sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
if not gfile.Exists(target_path):
print("Tokenizing data in %s" % data_path)
vocab, _ = initialize_vocabulary(vocabulary_path)
with gfile.GFile(data_path, mode="rb") as data_file:
with gfile.GFile(target_path, mode="w") as tokens_file:
counter = 0
for line in data_file:
counter += 1
if counter % 100000 == 0:
print(" tokenizing line %d" % counter)
fvs=[]
for fv in eval(line):
token_ids = sentence_to_token_ids(fv, vocab, tokenizer,
normalize_digits)
fvs.append(token_ids)
tokens_file.write(str(fvs) + "\n")
def data_to_token_ids_target(data_path, target_path, vocabulary_path,
tokenizer=None, normalize_digits=False):
"""Tokenize data file and turn into token-ids using given vocabulary file.
This function loads data line-by-line from data_path, calls the above
sentence_to_token_ids, and saves the result to target_path. See comment
for sentence_to_token_ids on the details of token-ids format.
Args:
data_path: path to the data file in one-sentence-per-line format.
target_path: path where the file with token-ids will be created.
vocabulary_path: path to the vocabulary file.
tokenizer: a function to use to tokenize each sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
if not gfile.Exists(target_path):
print("Tokenizing data in %s" % data_path)
vocab, _ = initialize_vocabulary(vocabulary_path)
with gfile.GFile(data_path, mode="rb") as data_file:
with gfile.GFile(target_path, mode="w") as tokens_file:
counter = 0
for line in data_file:
line = line.replace('\\n', '\\\\n')
counter += 1
if counter % 100000 == 0:
print(" tokenizing line %d" % counter)
token_ids = sentence_to_token_ids(eval(line), vocab, tokenizer,
normalize_digits)
tokens_file.write(str(token_ids) + "\n")
def prepare_data(data_dir, en_vocabulary_size, fr_vocabulary_size, tokenizer=None):
"""Get WMT data into data_dir, create vocabularies and tokenize data.
Args:
data_dir: directory in which the data sets will be stored.
en_vocabulary_size: size of the English vocabulary to create and use.
fr_vocabulary_size: size of the French vocabulary to create and use.
tokenizer: a function to use to tokenize each data sentence;
if None, basic_tokenizer will be used.
Returns:
A tuple of 6 elements:
(1) path to the token-ids for English training data-set,
(2) path to the token-ids for French training data-set,
(3) path to the token-ids for English development data-set,
(4) path to the token-ids for French development data-set,
(5) path to the English vocabulary file,
(6) path to the French vocabulary file.
"""
# Get wmt data to the specified directory.
train_path = get_wmt_enfr_train_set(data_dir)
dev_path = get_wmt_enfr_dev_set(data_dir)
# Create vocabularies of the appropriate sizes.
fr_vocab_path = os.path.join(data_dir, "vocab%d.answers" % fr_vocabulary_size)
en_vocab_path = os.path.join(data_dir, "vocab%d.questions" % en_vocabulary_size)
create_vocabulary_source(en_vocab_path, train_path + ".questions", en_vocabulary_size, tokenizer)
create_vocabulary_target(fr_vocab_path, train_path + ".answers", fr_vocabulary_size, tokenizer)
# Create token ids for the training data.
fr_train_ids_path = train_path + (".ids%d.answers" % fr_vocabulary_size)
en_train_ids_path = train_path + (".ids%d.questions" % en_vocabulary_size)
data_to_token_ids_target(train_path + ".answers", fr_train_ids_path, fr_vocab_path, tokenizer)
data_to_token_ids_source(train_path + ".questions", en_train_ids_path, en_vocab_path, tokenizer)
return (en_train_ids_path, fr_train_ids_path,
en_train_ids_path, fr_train_ids_path,
en_vocab_path, fr_vocab_path)
def get_lens(inputs, split_sentences=False):
lens = np.zeros((len(inputs)), dtype=int)
for i, t in enumerate(inputs):
lens[i] = t.shape[0]
return lens
def get_sentence_lens(inputs):
lens = np.zeros((len(inputs)), dtype=int)
sen_lens = []
max_sen_lens = []
for i, t in enumerate(inputs):
sentence_lens = np.zeros((len(t)), dtype=int)
for j, s in enumerate(t):
sentence_lens[j] = len(s)
lens[i] = len(t)
sen_lens.append(sentence_lens)
max_sen_lens.append(np.max(sentence_lens))
return lens, sen_lens, max(max_sen_lens)
def pad_inputs(inputs, lens, max_len, mode="", sen_lens=None, max_sen_len=None):
if mode == "mask":
padded = [np.pad(inp, (0, max_len - lens[i]), 'constant', constant_values=PAD_ID) for i, inp in enumerate(inputs)]
return np.vstack(padded)
elif mode == "split_sentences":
padded = np.zeros((len(inputs), max_len, max_sen_len))
for i, inp in enumerate(inputs):
padded_sentences = [np.pad(s, (0, max_sen_len - sen_lens[i][j]), 'constant', constant_values=PAD_ID) for j, s in enumerate(inp)]
# trim array according to max allowed inputs
if len(padded_sentences) > max_len:
padded_sentences = padded_sentences[(len(padded_sentences)-max_len):]
lens[i] = max_len
padded_sentences = np.vstack(padded_sentences)
padded_sentences = np.pad(padded_sentences, ((0, max_len - lens[i]),(0,0)), 'constant', constant_values=PAD_ID)
padded[i] = padded_sentences
return padded
padded = [np.pad(np.squeeze(inp, axis=1), (0, max_len - lens[i]), 'constant', constant_values=PAD_ID) for i, inp in enumerate(inputs)]
return np.vstack(padded)
def read_data(source_path, target_path, max_size=None):
"""Read data from source and target files and put into buckets.
Args:
source_path: path to the files with token-ids for the source language.
target_path: path to the file with token-ids for the target language;
it must be aligned with the source file: n-th line contains the desired
output for n-th line from the source_path.
max_size: maximum number of lines to read, all other will be ignored;
if 0 or None, data files will be read completely (no limit).
Returns:
data_set: a list of length len(_buckets); data_set[n] contains a list of
(source, target) pairs read from the provided data files that fit
into the n-th bucket, i.e., such that len(source) < _buckets[n][0] and
len(target) < _buckets[n][1]; source and target are lists of token-ids.
"""
sources = []
targets = []
with tf.gfile.GFile(source_path, mode="r") as source_file:
with tf.gfile.GFile(target_path, mode="r") as target_file:
source, target = source_file.readline(), target_file.readline()
count=0
while source and target:
count+=1
#print (count)
sources.append(np.array(eval(source)))
targets.append(np.array([GO_ID]+eval(target)+[EOS_ID]))
source, target = source_file.readline(), target_file.readline()
return sources, targets
def pad_length_bucket(source, targets, config, split_sentences=True):
inputs = source
if split_sentences:
input_lens, sen_lens, max_sen_len = get_sentence_lens(inputs)
max_mask_len = max_sen_len
else:
input_lens = get_lens(inputs)
t_lens = get_lens(targets)
max_t_len = np.max(t_lens)
max_input_len = min(np.max(input_lens), config.max_allowed_inputs)
#pad out arrays to max
if split_sentences:
inputs = pad_inputs(inputs, input_lens, max_input_len, "split_sentences", sen_lens, max_sen_len)
input_masks = np.zeros(len(inputs))
else:
inputs = pad_inputs(inputs, input_lens, max_input_len)
input_masks = pad_inputs(input_masks, mask_lens, max_mask_len, "mask")
targets = pad_inputs(targets, t_lens, max_t_len, "mask")
if config.train_mode:
train = targets[:config.num_train], inputs[:config.num_train], t_lens[:config.num_train], input_lens[:config.num_train], input_masks[:config.num_train]
valid = targets[config.num_train:], inputs[config.num_train:], t_lens[config.num_train:], input_lens[config.num_train:], input_masks[config.num_train:]
return train, valid, max_t_len, max_input_len, max_mask_len
else:
test = targets, inputs, t_lens, input_lens, input_masks, answers, rel_labels
return test, max_t_len, max_input_len, max_mask_len
def get_vocab_size(vocab_path):
with tf.gfile.GFile(vocab_path, mode="r") as vocab_file:
vocab_line = vocab_file.readline()
count=0
while vocab_line:
count+=1
vocab_line = vocab_file.readline()
print(count)
return count
|
py | 1a4c0680fed14be9b9a5a2339c01b3893078d432 | from boto.s3.connection import S3Connection
from storages.backends.s3boto import S3BotoStorage
import os
import math
from multiprocessing.pool import Pool
def _upload_part(bucketname, aws_key, aws_secret, multipart_id, part_num,
source_path, offset, bytes, amount_of_retries=10):
"""
Uploads a part with retries.
"""
def _upload(retries_left=amount_of_retries):
try:
conn = S3Connection(aws_key, aws_secret)
bucket = conn.get_bucket(bucketname)
for mp in bucket.get_all_multipart_uploads():
if mp.id == multipart_id:
with open(source_path, 'rb') as fp:
fp.seek(offset)
mp.upload_part_from_file(fp=fp, part_num=part_num, size=bytes)
break
except Exception, exc:
if retries_left:
_upload(retries_left=retries_left - 1)
else:
print('... Failed uploading part #%d' % part_num)
raise exc
_upload()
class ThreadedS3BotoStorage(S3BotoStorage):
def _save_content(self, key, content, headers):
print "key.name", key.name
source_size = os.stat(content.file.name).st_size
bytes_per_chunk = max(int(math.sqrt(5242880) * math.sqrt(source_size)), 5242880)
chunk_amount = int(math.ceil(source_size / float(bytes_per_chunk)))
mp = self.bucket.initiate_multipart_upload(key.name, headers=headers)
pool = Pool(processes=1)
for i in range(chunk_amount):
offset = i * bytes_per_chunk
remaining_bytes = source_size - offset
bytes = min([bytes_per_chunk, remaining_bytes])
part_num = i + 1
pool.apply_async(_upload_part, [self.bucket_name, self.access_key,
self.secret_key, mp.id, part_num,
content.file.name, offset, bytes])
pool.close()
pool.join()
if len(mp.get_all_parts()) == chunk_amount:
mp.complete_upload()
key = self.bucket.get_key(key.name)
else:
mp.cancel_upload()
|
py | 1a4c09885c16aad172458ca745ad1b43664f136b | import logging
import logconfig
logconfig.logconfig(filename=None)
logconfig.loglevel(logging.INFO)
import squaregrid
from sde import *
def test_sde():
def c(z):
return 2.0*z*z*z - 1j*z + 0.2
gr = squaregrid.SquareGrid(3.0,255)
def report(Q):
"""Print data about solution in SelfDualityEquation object Q"""
j = int(gr.ny / 2)
for i in range(0,gr.nx,gr.nx // 10):
z = Q.grid.zm[j,i]
u = Q.u[j,i]
u0 = Q.u0[j,i]
print('u(%g%+gi) = \t%f (diff from uzero is %f)' % (z.real,z.imag,u,u-u0))
print("----------------------------------------------------------------------")
print(" FOURIER METHOD")
print("----------------------------------------------------------------------")
global QF
QF = SelfDualityEquation(3,c,gr,method='fourier')
report(QF)
print("----------------------------------------------------------------------")
print(" EULER METHOD")
print("----------------------------------------------------------------------")
global QE
QE = SelfDualityEquation(3,c,gr,method='euler')
report(QE)
|
py | 1a4c0a53aa9d367c45e245db82923acc514b6dc3 | import numpy as np
import os
import pytest
import tempfile
import torch
from mmcv.parallel import MMDataParallel
from os.path import dirname, exists, join
from mmdet3d.apis import (convert_SyncBN, inference_detector,
inference_mono_3d_detector,
inference_multi_modality_detector,
inference_segmentor, init_model, show_result_meshlab,
single_gpu_test)
from mmdet3d.core import Box3DMode
from mmdet3d.core.bbox import (CameraInstance3DBoxes, DepthInstance3DBoxes,
LiDARInstance3DBoxes)
from mmdet3d.datasets import build_dataloader, build_dataset
from mmdet3d.models import build_model
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmdetection3d repo
repo_dpath = dirname(dirname(dirname(__file__)))
except NameError:
# For IPython development when this __file__ is not defined
import mmdet3d
repo_dpath = dirname(dirname(mmdet3d.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _get_config_module(fname):
"""Load a configuration as a python module."""
from mmcv import Config
config_dpath = _get_config_directory()
config_fpath = join(config_dpath, fname)
config_mod = Config.fromfile(config_fpath)
return config_mod
def test_convert_SyncBN():
cfg = _get_config_module(
'pointpillars/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d.py')
model_cfg = cfg.model
convert_SyncBN(model_cfg)
assert model_cfg['pts_voxel_encoder']['norm_cfg']['type'] == 'BN1d'
assert model_cfg['pts_backbone']['norm_cfg']['type'] == 'BN2d'
assert model_cfg['pts_neck']['norm_cfg']['type'] == 'BN2d'
def test_show_result_meshlab():
pcd = 'tests/data/nuscenes/samples/LIDAR_TOP/n015-2018-08-02-17-16-37+' \
'0800__LIDAR_TOP__1533201470948018.pcd.bin'
box_3d = LiDARInstance3DBoxes(
torch.tensor(
[[8.7314, -1.8559, -1.5997, 0.4800, 1.2000, 1.8900, 0.0100]]))
labels_3d = torch.tensor([0])
scores_3d = torch.tensor([0.5])
points = np.random.rand(100, 4)
img_meta = dict(
pts_filename=pcd, boxes_3d=box_3d, box_mode_3d=Box3DMode.LIDAR)
data = dict(points=[[torch.tensor(points)]], img_metas=[[img_meta]])
result = [
dict(
pts_bbox=dict(
boxes_3d=box_3d, labels_3d=labels_3d, scores_3d=scores_3d))
]
tmp_dir = tempfile.TemporaryDirectory()
temp_out_dir = tmp_dir.name
out_dir, file_name = show_result_meshlab(data, result, temp_out_dir)
expected_outfile_pred = file_name + '_pred.obj'
expected_outfile_pts = file_name + '_points.obj'
expected_outfile_pred_path = os.path.join(out_dir, file_name,
expected_outfile_pred)
expected_outfile_pts_path = os.path.join(out_dir, file_name,
expected_outfile_pts)
assert os.path.exists(expected_outfile_pred_path)
assert os.path.exists(expected_outfile_pts_path)
tmp_dir.cleanup()
# test multi-modality show
# indoor scene
pcd = 'tests/data/sunrgbd/points/000001.bin'
filename = 'tests/data/sunrgbd/sunrgbd_trainval/image/000001.jpg'
box_3d = DepthInstance3DBoxes(
torch.tensor(
[[-1.1580, 3.3041, -0.9961, 0.3829, 0.4647, 0.5574, 1.1213]]))
img = np.random.randn(1, 3, 608, 832)
k_mat = np.array([[529.5000, 0.0000, 365.0000],
[0.0000, 529.5000, 265.0000], [0.0000, 0.0000, 1.0000]])
rt_mat = np.array([[0.9980, 0.0058, -0.0634], [0.0058, 0.9835, 0.1808],
[0.0634, -0.1808, 0.9815]])
rt_mat = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]]) @ rt_mat.transpose(
1, 0)
depth2img = k_mat @ rt_mat
img_meta = dict(
filename=filename,
depth2img=depth2img,
pcd_horizontal_flip=False,
pcd_vertical_flip=False,
box_mode_3d=Box3DMode.DEPTH,
box_type_3d=DepthInstance3DBoxes,
pcd_trans=np.array([0., 0., 0.]),
pcd_scale_factor=1.0,
pts_filename=pcd,
transformation_3d_flow=['R', 'S', 'T'])
data = dict(
points=[[torch.tensor(points)]], img_metas=[[img_meta]], img=[img])
result = [dict(boxes_3d=box_3d, labels_3d=labels_3d, scores_3d=scores_3d)]
tmp_dir = tempfile.TemporaryDirectory()
temp_out_dir = tmp_dir.name
out_dir, file_name = show_result_meshlab(
data, result, temp_out_dir, 0.3, task='multi_modality-det')
expected_outfile_pred = file_name + '_pred.obj'
expected_outfile_pts = file_name + '_points.obj'
expected_outfile_png = file_name + '_img.png'
expected_outfile_proj = file_name + '_pred.png'
expected_outfile_pred_path = os.path.join(out_dir, file_name,
expected_outfile_pred)
expected_outfile_pts_path = os.path.join(out_dir, file_name,
expected_outfile_pts)
expected_outfile_png_path = os.path.join(out_dir, file_name,
expected_outfile_png)
expected_outfile_proj_path = os.path.join(out_dir, file_name,
expected_outfile_proj)
assert os.path.exists(expected_outfile_pred_path)
assert os.path.exists(expected_outfile_pts_path)
assert os.path.exists(expected_outfile_png_path)
assert os.path.exists(expected_outfile_proj_path)
tmp_dir.cleanup()
# outdoor scene
pcd = 'tests/data/kitti/training/velodyne_reduced/000000.bin'
filename = 'tests/data/kitti/training/image_2/000000.png'
box_3d = LiDARInstance3DBoxes(
torch.tensor(
[[6.4495, -3.9097, -1.7409, 1.5063, 3.1819, 1.4716, 1.8782]]))
img = np.random.randn(1, 3, 384, 1280)
lidar2img = np.array(
[[6.09695435e+02, -7.21421631e+02, -1.25125790e+00, -1.23041824e+02],
[1.80384201e+02, 7.64479828e+00, -7.19651550e+02, -1.01016693e+02],
[9.99945343e-01, 1.24365499e-04, 1.04513029e-02, -2.69386917e-01],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
img_meta = dict(
filename=filename,
pcd_horizontal_flip=False,
pcd_vertical_flip=False,
box_mode_3d=Box3DMode.LIDAR,
box_type_3d=LiDARInstance3DBoxes,
pcd_trans=np.array([0., 0., 0.]),
pcd_scale_factor=1.0,
pts_filename=pcd,
lidar2img=lidar2img)
data = dict(
points=[[torch.tensor(points)]], img_metas=[[img_meta]], img=[img])
result = [
dict(
pts_bbox=dict(
boxes_3d=box_3d, labels_3d=labels_3d, scores_3d=scores_3d))
]
out_dir, file_name = show_result_meshlab(
data, result, temp_out_dir, 0.1, task='multi_modality-det')
tmp_dir = tempfile.TemporaryDirectory()
temp_out_dir = tmp_dir.name
expected_outfile_pred = file_name + '_pred.obj'
expected_outfile_pts = file_name + '_points.obj'
expected_outfile_png = file_name + '_img.png'
expected_outfile_proj = file_name + '_pred.png'
expected_outfile_pred_path = os.path.join(out_dir, file_name,
expected_outfile_pred)
expected_outfile_pts_path = os.path.join(out_dir, file_name,
expected_outfile_pts)
expected_outfile_png_path = os.path.join(out_dir, file_name,
expected_outfile_png)
expected_outfile_proj_path = os.path.join(out_dir, file_name,
expected_outfile_proj)
assert os.path.exists(expected_outfile_pred_path)
assert os.path.exists(expected_outfile_pts_path)
assert os.path.exists(expected_outfile_png_path)
assert os.path.exists(expected_outfile_proj_path)
tmp_dir.cleanup()
# test mono-3d show
filename = 'tests/data/nuscenes/samples/CAM_BACK_LEFT/n015-2018-' \
'07-18-11-07-57+0800__CAM_BACK_LEFT__1531883530447423.jpg'
box_3d = CameraInstance3DBoxes(
torch.tensor(
[[6.4495, -3.9097, -1.7409, 1.5063, 3.1819, 1.4716, 1.8782]]))
img = np.random.randn(1, 3, 384, 1280)
cam_intrinsic = np.array([[100.0, 0.0, 50.0], [0.0, 100.0, 50.0],
[0.0, 0.0, 1.0]])
img_meta = dict(
filename=filename,
pcd_horizontal_flip=False,
pcd_vertical_flip=False,
box_mode_3d=Box3DMode.CAM,
box_type_3d=CameraInstance3DBoxes,
pcd_trans=np.array([0., 0., 0.]),
pcd_scale_factor=1.0,
cam_intrinsic=cam_intrinsic)
data = dict(
points=[[torch.tensor(points)]], img_metas=[[img_meta]], img=[img])
result = [
dict(
img_bbox=dict(
boxes_3d=box_3d, labels_3d=labels_3d, scores_3d=scores_3d))
]
out_dir, file_name = show_result_meshlab(
data, result, temp_out_dir, 0.1, task='mono-det')
tmp_dir = tempfile.TemporaryDirectory()
temp_out_dir = tmp_dir.name
expected_outfile_png = file_name + '_img.png'
expected_outfile_proj = file_name + '_pred.png'
expected_outfile_png_path = os.path.join(out_dir, file_name,
expected_outfile_png)
expected_outfile_proj_path = os.path.join(out_dir, file_name,
expected_outfile_proj)
assert os.path.exists(expected_outfile_png_path)
assert os.path.exists(expected_outfile_proj_path)
tmp_dir.cleanup()
# test seg show
pcd = 'tests/data/scannet/points/scene0000_00.bin'
points = np.random.rand(100, 6)
img_meta = dict(pts_filename=pcd)
data = dict(points=[[torch.tensor(points)]], img_metas=[[img_meta]])
pred_seg = torch.randint(0, 20, (100, ))
result = [dict(semantic_mask=pred_seg)]
tmp_dir = tempfile.TemporaryDirectory()
temp_out_dir = tmp_dir.name
out_dir, file_name = show_result_meshlab(
data, result, temp_out_dir, task='seg')
expected_outfile_pred = file_name + '_pred.obj'
expected_outfile_pts = file_name + '_points.obj'
expected_outfile_pred_path = os.path.join(out_dir, file_name,
expected_outfile_pred)
expected_outfile_pts_path = os.path.join(out_dir, file_name,
expected_outfile_pts)
assert os.path.exists(expected_outfile_pred_path)
assert os.path.exists(expected_outfile_pts_path)
tmp_dir.cleanup()
def test_inference_detector():
pcd = 'tests/data/kitti/training/velodyne_reduced/000000.bin'
detector_cfg = 'configs/pointpillars/hv_pointpillars_secfpn_' \
'6x8_160e_kitti-3d-3class.py'
detector = init_model(detector_cfg, device='cpu')
results = inference_detector(detector, pcd)
bboxes_3d = results[0][0]['boxes_3d']
scores_3d = results[0][0]['scores_3d']
labels_3d = results[0][0]['labels_3d']
assert bboxes_3d.tensor.shape[0] >= 0
assert bboxes_3d.tensor.shape[1] == 7
assert scores_3d.shape[0] >= 0
assert labels_3d.shape[0] >= 0
def test_inference_multi_modality_detector():
# these two multi-modality models both only have GPU implementations
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
# indoor scene
pcd = 'tests/data/sunrgbd/points/000001.bin'
img = 'tests/data/sunrgbd/sunrgbd_trainval/image/000001.jpg'
ann_file = 'tests/data/sunrgbd/sunrgbd_infos.pkl'
detector_cfg = 'configs/imvotenet/imvotenet_stage2_'\
'16x8_sunrgbd-3d-10class.py'
detector = init_model(detector_cfg, device='cuda:0')
results = inference_multi_modality_detector(detector, pcd, img, ann_file)
bboxes_3d = results[0][0]['boxes_3d']
scores_3d = results[0][0]['scores_3d']
labels_3d = results[0][0]['labels_3d']
assert bboxes_3d.tensor.shape[0] >= 0
assert bboxes_3d.tensor.shape[1] == 7
assert scores_3d.shape[0] >= 0
assert labels_3d.shape[0] >= 0
# outdoor scene
pcd = 'tests/data/kitti/training/velodyne_reduced/000000.bin'
img = 'tests/data/kitti/training/image_2/000000.png'
ann_file = 'tests/data/kitti/kitti_infos_train.pkl'
detector_cfg = 'configs/mvxnet/dv_mvx-fpn_second_secfpn_adamw_' \
'2x8_80e_kitti-3d-3class.py'
detector = init_model(detector_cfg, device='cuda:0')
results = inference_multi_modality_detector(detector, pcd, img, ann_file)
bboxes_3d = results[0][0]['pts_bbox']['boxes_3d']
scores_3d = results[0][0]['pts_bbox']['scores_3d']
labels_3d = results[0][0]['pts_bbox']['labels_3d']
assert bboxes_3d.tensor.shape[0] >= 0
assert bboxes_3d.tensor.shape[1] == 7
assert scores_3d.shape[0] >= 0
assert labels_3d.shape[0] >= 0
def test_inference_mono_3d_detector():
# FCOS3D only has GPU implementations
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
img = 'tests/data/nuscenes/samples/CAM_BACK_LEFT/' \
'n015-2018-07-18-11-07-57+0800__CAM_BACK_LEFT__1531883530447423.jpg'
ann_file = 'tests/data/nuscenes/nus_infos_mono3d.coco.json'
detector_cfg = 'configs/fcos3d/fcos3d_r101_caffe_fpn_gn-head_dcn_' \
'2x8_1x_nus-mono3d.py'
detector = init_model(detector_cfg, device='cuda:0')
results = inference_mono_3d_detector(detector, img, ann_file)
bboxes_3d = results[0][0]['img_bbox']['boxes_3d']
scores_3d = results[0][0]['img_bbox']['scores_3d']
labels_3d = results[0][0]['img_bbox']['labels_3d']
assert bboxes_3d.tensor.shape[0] >= 0
assert bboxes_3d.tensor.shape[1] == 9
assert scores_3d.shape[0] >= 0
assert labels_3d.shape[0] >= 0
def test_inference_segmentor():
# PN2 only has GPU implementations
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
pcd = 'tests/data/scannet/points/scene0000_00.bin'
segmentor_cfg = 'configs/pointnet2/pointnet2_ssg_' \
'16x2_cosine_200e_scannet_seg-3d-20class.py'
segmentor = init_model(segmentor_cfg, device='cuda:0')
results = inference_segmentor(segmentor, pcd)
seg_3d = results[0][0]['semantic_mask']
assert seg_3d.shape == torch.Size([100])
assert seg_3d.min() >= 0
assert seg_3d.max() <= 19
def test_single_gpu_test():
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
cfg = _get_config_module('votenet/votenet_16x8_sunrgbd-3d-10class.py')
cfg.model.train_cfg = None
model = build_model(cfg.model, test_cfg=cfg.get('test_cfg'))
dataset_cfg = cfg.data.test
dataset_cfg.data_root = './tests/data/sunrgbd'
dataset_cfg.ann_file = 'tests/data/sunrgbd/sunrgbd_infos.pkl'
dataset = build_dataset(dataset_cfg)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=False,
shuffle=False)
model = MMDataParallel(model, device_ids=[0])
results = single_gpu_test(model, data_loader)
bboxes_3d = results[0]['boxes_3d']
scores_3d = results[0]['scores_3d']
labels_3d = results[0]['labels_3d']
assert bboxes_3d.tensor.shape[0] >= 0
assert bboxes_3d.tensor.shape[1] == 7
assert scores_3d.shape[0] >= 0
assert labels_3d.shape[0] >= 0
|
py | 1a4c0a6ff23ace4bc938b6886ddf8d81b763dbab | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Cisco Systems
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r"""
---
module: guest_smtp_notification_settings_info
short_description: Information module for Guest Smtp Notification Settings
description:
- Get all Guest Smtp Notification Settings.
- Get Guest Smtp Notification Settings by id.
version_added: '1.0.0'
author: Rafael Campos (@racampos)
options:
id:
description:
- Id path parameter.
type: str
page:
description:
- Page query parameter. Page number.
type: int
size:
description:
- Size query parameter. Number of objects returned per page.
type: int
sortasc:
description:
- Sortasc query parameter. Sort asc.
type: str
sortdsc:
description:
- Sortdsc query parameter. Sort desc.
type: str
filter:
description:
- >
Filter query parameter. <br/> **Simple filtering** should be available through the filter query string
parameter. The structure of a filter is a triplet of field operator and value separated with dots. More than
one filter can be sent. The logical operator common to ALL filter criteria will be by default AND, and can
be changed by using the "filterType=or" query string parameter. Each resource Data model description should
specify if an attribute is a filtered field. <br/> Operator | Description <br/>
------------|----------------- <br/> EQ | Equals <br/> NEQ | Not Equals <br/> GT | Greater Than <br/> LT |
Less Then <br/> STARTSW | Starts With <br/> NSTARTSW | Not Starts With <br/> ENDSW | Ends With <br/> NENDSW
| Not Ends With <br/> CONTAINS | Contains <br/> NCONTAINS | Not Contains <br/>.
type: list
filterType:
description:
- >
FilterType query parameter. The logical operator common to ALL filter criteria will be by default AND, and
can be changed by using the parameter.
type: str
requirements:
- ciscoisesdk
seealso:
# Reference by Internet resource
- name: Guest Smtp Notification Settings reference
description: Complete reference of the Guest Smtp Notification Settings object model.
link: https://ciscoisesdk.readthedocs.io/en/latest/api/api.html#v3-0-0-summary
"""
EXAMPLES = r"""
- name: Get all Guest Smtp Notification Settings
cisco.ise.guest_smtp_notification_settings_info:
ise_hostname: "{{ise_hostname}}"
ise_username: "{{ise_username}}"
ise_password: "{{ise_password}}"
ise_verify: "{{ise_verify}}"
page: 1
size: 20
sortasc: string
sortdsc: string
filter: []
filterType: AND
register: result
- name: Get Guest Smtp Notification Settings by id
cisco.ise.guest_smtp_notification_settings_info:
ise_hostname: "{{ise_hostname}}"
ise_username: "{{ise_username}}"
ise_password: "{{ise_password}}"
ise_verify: "{{ise_verify}}"
id: string
register: result
"""
RETURN = r"""
ise_response:
description: A dictionary or list with the response returned by the Cisco ISE Python SDK
returned: always
type: dict
sample: >
{
"id": "string",
"smtpServer": "string",
"notificationEnabled": true,
"useDefaultFromAddress": true,
"defaultFromAddress": "string",
"smtpPort": "string",
"connectionTimeout": "string",
"useTLSorSSLEncryption": true,
"usePasswordAuthentication": true,
"userName": "string",
"password": "string",
"link": {
"rel": "string",
"href": "string",
"type": "string"
}
}
"""
|
py | 1a4c0acbc56bccfc1fa286b33ae0517ae66a2c8d | ### @export "setup"
import fake_input
input, input = fake_input.create(['', 'Mary had a little lamb',
'Its fleece was white as snow',
'It was also tasty'])
### @export "code"
from sys import argv
script, filename = argv
print(f"We're going to erase {filename}.")
print("If you don't want that, hit CTRL-C (^C).")
print("If you do want that, hit RETURN.")
input("?")
print("Opening the file...")
target = open(filename, 'w')
print("Truncating the file. Goodbye!")
target.truncate()
print("Now I'm going to ask you for three lines.")
line1 = input("line 1: ")
line2 = input("line 2: ")
line3 = input("line 3: ")
print("I'm going to write these to the file.")
target.write(line1)
target.write("\n")
target.write(line2)
target.write("\n")
target.write(line3)
target.write("\n")
print("And finally, we close it.")
target.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.