ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b4106fe0443c2305ae430757a2f9006821be7c82 | #!/usr/bin/env python
#
# Copyright (c) 2018, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import copy
import time
import unittest
import command
import config
import mle
import node
DUT_LEADER = 1
DUT_ROUTER1 = 2
class Cert_5_5_1_LeaderReboot(unittest.TestCase):
def setUp(self):
self.simulator = config.create_default_simulator()
self.nodes = {}
for i in range(1,3):
self.nodes[i] = node.Node(i, simulator = self.simulator)
self.nodes[DUT_LEADER].set_panid(0xface)
self.nodes[DUT_LEADER].set_mode('rsdn')
self._setUpLeader()
self.nodes[DUT_ROUTER1].set_panid(0xface)
self.nodes[DUT_ROUTER1].set_mode('rsdn')
self.nodes[DUT_ROUTER1].add_whitelist(self.nodes[DUT_LEADER].get_addr64())
self.nodes[DUT_ROUTER1].enable_whitelist()
self.nodes[DUT_ROUTER1].set_router_selection_jitter(1)
def _setUpLeader(self):
self.nodes[DUT_LEADER].add_whitelist(self.nodes[DUT_ROUTER1].get_addr64())
self.nodes[DUT_LEADER].enable_whitelist()
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
del self.nodes
del self.simulator
def test(self):
# 1 ALL: Build and verify the topology
self.nodes[DUT_LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[DUT_LEADER].get_state(), 'leader')
self.nodes[DUT_ROUTER1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[DUT_ROUTER1].get_state(), 'router')
# 2 DUT_LEADER, DUT_ROUTER1: Verify both DUT_LEADER and DUT_ROUTER1 send MLE Advertisement message
leader_messages = self.simulator.get_messages_sent_by(DUT_LEADER)
msg = leader_messages.next_mle_message(mle.CommandType.ADVERTISEMENT)
command.check_mle_advertisement(msg)
router1_messages = self.simulator.get_messages_sent_by(DUT_ROUTER1)
msg = router1_messages.next_mle_message(mle.CommandType.ADVERTISEMENT)
command.check_mle_advertisement(msg)
# Send a harness helper ping to the DUT
router1_rloc = self.nodes[DUT_ROUTER1].get_ip6_address(config.ADDRESS_TYPE.RLOC)
self.assertTrue(self.nodes[DUT_LEADER].ping(router1_rloc))
leader_rloc = self.nodes[DUT_LEADER].get_ip6_address(config.ADDRESS_TYPE.RLOC)
self.assertTrue(self.nodes[DUT_ROUTER1].ping(leader_rloc))
# 3 DUT_LEADER: Reset DUT_LEADER
leader_rloc16 = self.nodes[DUT_LEADER].get_addr16()
self.nodes[DUT_LEADER].reset()
self._setUpLeader()
# Clean sniffer's buffer
self.simulator.get_messages_sent_by(DUT_LEADER)
self.simulator.get_messages_sent_by(DUT_ROUTER1)
# DUT_LEADER sleep time is less than leader timeout value
self.simulator.go(config.MAX_ADVERTISEMENT_INTERVAL)
# Verify DUT_LEADER didn't send MLE Advertisement messages
leader_messages = self.simulator.get_messages_sent_by(DUT_LEADER)
msg = leader_messages.next_mle_message(mle.CommandType.ADVERTISEMENT, False)
self.assertTrue(msg is None)
self.nodes[DUT_LEADER].start()
# Verify the DUT_LEADER is still a leader
self.simulator.go(5)
self.assertEqual(self.nodes[DUT_LEADER].get_state(), 'leader')
self.assertEqual(self.nodes[DUT_LEADER].get_addr16(), leader_rloc16)
# 4 DUT_LEADER: Verify DUT_LEADER sent a multicast Link Request message
leader_messages = self.simulator.get_messages_sent_by(DUT_LEADER)
leader_messages_temp = copy.deepcopy(leader_messages)
msg = leader_messages.next_mle_message(mle.CommandType.LINK_REQUEST)
command.check_link_request(msg, tlv_request_address16 = command.CheckType.CONTAIN, \
tlv_request_route64 = command.CheckType.CONTAIN)
# 5 DUT_ROUTER1: Verify DUT_ROUTER1 replied with Link Accept message
router1_messages = self.simulator.get_messages_sent_by(DUT_ROUTER1)
router1_messages_temp = copy.deepcopy(router1_messages)
msg = router1_messages.next_mle_message(mle.CommandType.LINK_ACCEPT)
if msg is not None:
command.check_link_accept(msg, self.nodes[DUT_LEADER], address16 = command.CheckType.CONTAIN, \
leader_data = command.CheckType.CONTAIN, route64 = command.CheckType.CONTAIN)
else:
msg = router1_messages_temp.next_mle_message(mle.CommandType.LINK_ACCEPT_AND_REQUEST)
self.assertTrue(msg is not None)
command.check_link_accept(msg, self.nodes[DUT_LEADER], address16 = command.CheckType.CONTAIN, \
leader_data = command.CheckType.CONTAIN, route64 = command.CheckType.CONTAIN, \
challenge = command.CheckType.CONTAIN)
# 6 DUT_LEADER: Verify DUT_LEADER didn't send a Parent Request message
msg = leader_messages_temp.next_mle_message(mle.CommandType.PARENT_REQUEST, False)
self.assertTrue(msg is None)
# 7 ALL: Verify connectivity by sending an ICMPv6 Echo Request from DUT_LEADER to DUT_ROUTER1 link local address
router1_link_local_address = self.nodes[DUT_ROUTER1].get_ip6_address(config.ADDRESS_TYPE.LINK_LOCAL)
self.assertTrue(self.nodes[DUT_LEADER].ping(router1_link_local_address))
if __name__ == '__main__':
unittest.main()
|
py | b4106fe0449c612ca8f0b5dfa7bf4cf37cac1332 | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class bybit(Exchange):
def describe(self):
return self.deep_extend(super(bybit, self).describe(), {
'id': 'bybit',
'name': 'Bybit',
'countries': ['VG'], # British Virgin Islands
'version': 'v2',
'userAgent': None,
'rateLimit': 100,
'hostname': 'bybit.com', # bybit.com, bytick.com
'has': {
'margin': False,
'swap': True,
'future': True,
'cancelAllOrders': True,
'cancelOrder': True,
'CORS': True,
'createOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRates': False,
'fetchClosedOrders': True,
'fetchDeposits': True,
'fetchFundingRate': True,
'fetchFundingRateHistory': False,
'fetchIndexOHLCV': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMarkOHLCV': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchOrderTrades': True,
'fetchPositions': True,
'fetchPremiumIndexOHLCV': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTransactions': None,
'fetchWithdrawals': True,
'setLeverage': True,
'setMarginMode': True,
},
'timeframes': {
'1m': '1',
'3m': '3',
'5m': '5',
'15m': '15',
'30m': '30',
'1h': '60',
'2h': '120',
'4h': '240',
'6h': '360',
'12h': '720',
'1d': 'D',
'1w': 'W',
'1M': 'M',
'1y': 'Y',
},
'urls': {
'test': {
'spot': 'https://api-testnet.{hostname}',
'futures': 'https://api-testnet.{hostname}',
'v2': 'https://api-testnet.{hostname}',
'public': 'https://api-testnet.{hostname}',
'private': 'https://api-testnet.{hostname}',
},
'logo': 'https://user-images.githubusercontent.com/51840849/76547799-daff5b80-649e-11ea-87fb-3be9bac08954.jpg',
'api': {
'spot': 'https://api.{hostname}',
'futures': 'https://api.{hostname}',
'v2': 'https://api.{hostname}',
'public': 'https://api.{hostname}',
'private': 'https://api.{hostname}',
},
'www': 'https://www.bybit.com',
'doc': [
'https://bybit-exchange.github.io/docs/inverse/',
'https://bybit-exchange.github.io/docs/linear/',
'https://github.com/bybit-exchange',
],
'fees': 'https://help.bybit.com/hc/en-us/articles/360039261154',
'referral': 'https://www.bybit.com/app/register?ref=X7Prm',
},
'api': {
'spot': {
'public': {
'get': [
'symbols',
],
},
'quote': {
'get': [
'depth',
'depth/merged',
'trades',
'kline',
'ticker/24hr',
'ticker/price',
'ticker/book_ticker',
],
},
'private': {
'get': [
'order',
'open-orders',
'history-orders',
'myTrades',
'account',
'time',
],
'post': [
'order',
],
'delete': [
'order',
'order/fast',
],
},
'order': {
'delete': [
'batch-cancel',
'batch-fast-cancel',
'batch-cancel-by-ids',
],
},
},
'futures': {
'private': {
'get': [
'order/list',
'order',
'stop-order/list',
'stop-order',
'position/list',
'execution/list',
'trade/closed-pnl/list',
],
'post': [
'order/create',
'order/cancel',
'order/cancelAll',
'order/replace',
'stop-order/create',
'stop-order/cancel',
'stop-order/cancelAll',
'stop-order/replace',
'position/change-position-margin',
'position/trading-stop',
'position/leverage/save',
'position/switch-mode',
'position/switch-isolated',
'position/risk-limit',
],
},
},
'v2': {
'public': {
'get': [
'orderBook/L2',
'kline/list',
'tickers',
'trading-records',
'symbols',
'liq-records',
'mark-price-kline',
'index-price-kline',
'premium-index-kline',
'open-interest',
'big-deal',
'account-ratio',
'time',
'announcement',
'funding/prev-funding-rate',
'risk-limit/list',
],
},
'private': {
'get': [
'order/list',
'order',
'stop-order/list',
'stop-order',
'position/list',
'execution/list',
'trade/closed-pnl/list',
'funding/prev-funding-rate',
'funding/prev-funding',
'funding/predicted-funding',
'account/api-key',
'account/lcp',
'wallet/balance',
'wallet/fund/records',
'wallet/withdraw/list',
'exchange-order/list',
],
'post': [
'order/create',
'order/cancel',
'order/cancelAll',
'order/replace',
'stop-order/create',
'stop-order/cancel',
'stop-order/cancelAll',
'stop-order/replace',
'position/change-position-margin',
'position/trading-stop',
'position/leverage/save',
'position/switch-mode',
'position/switch-isolated',
'position/risk-limit',
],
},
},
'public': {
'linear': {
'get': [
'kline',
'recent-trading-records',
'funding/prev-funding-rate',
'mark-price-kline',
'index-price-kline',
'premium-index-kline',
'risk-limit',
],
},
},
'private': {
'linear': {
'get': [
'order/list',
'order/search',
'stop-order/list',
'stop-order/search',
'position/list',
'trade/execution/list',
'trade/closed-pnl/list',
'funding/predicted-funding',
'funding/prev-funding',
],
'post': [
'order/create',
'order/cancel',
'order/cancel-all',
'order/replace',
'stop-order/create',
'stop-order/cancel',
'stop-order/cancel-all',
'stop-order/replace',
'position/set-auto-add-margin',
'position/switch-isolated',
'tpsl/switch-mode',
'position/add-margin',
'position/set-leverage',
'position/trading-stop',
'position/set-risk',
],
},
},
},
'httpExceptions': {
'403': RateLimitExceeded, # Forbidden -- You request too many times
},
'exceptions': {
'exact': {
'-2015': AuthenticationError, # Invalid API-key, IP, or permissions for action.
'10001': BadRequest, # parameter error
'10002': InvalidNonce, # request expired, check your timestamp and recv_window
'10003': AuthenticationError, # Invalid apikey
'10004': AuthenticationError, # invalid sign
'10005': PermissionDenied, # permission denied for current apikey
'10006': RateLimitExceeded, # too many requests
'10007': AuthenticationError, # api_key not found in your request parameters
'10010': PermissionDenied, # request ip mismatch
'10017': BadRequest, # request path not found or request method is invalid
'10018': RateLimitExceeded, # exceed ip rate limit
'20001': OrderNotFound, # Order not exists
'20003': InvalidOrder, # missing parameter side
'20004': InvalidOrder, # invalid parameter side
'20005': InvalidOrder, # missing parameter symbol
'20006': InvalidOrder, # invalid parameter symbol
'20007': InvalidOrder, # missing parameter order_type
'20008': InvalidOrder, # invalid parameter order_type
'20009': InvalidOrder, # missing parameter qty
'20010': InvalidOrder, # qty must be greater than 0
'20011': InvalidOrder, # qty must be an integer
'20012': InvalidOrder, # qty must be greater than zero and less than 1 million
'20013': InvalidOrder, # missing parameter price
'20014': InvalidOrder, # price must be greater than 0
'20015': InvalidOrder, # missing parameter time_in_force
'20016': InvalidOrder, # invalid value for parameter time_in_force
'20017': InvalidOrder, # missing parameter order_id
'20018': InvalidOrder, # invalid date format
'20019': InvalidOrder, # missing parameter stop_px
'20020': InvalidOrder, # missing parameter base_price
'20021': InvalidOrder, # missing parameter stop_order_id
'20022': BadRequest, # missing parameter leverage
'20023': BadRequest, # leverage must be a number
'20031': BadRequest, # leverage must be greater than zero
'20070': BadRequest, # missing parameter margin
'20071': BadRequest, # margin must be greater than zero
'20084': BadRequest, # order_id or order_link_id is required
'30001': BadRequest, # order_link_id is repeated
'30003': InvalidOrder, # qty must be more than the minimum allowed
'30004': InvalidOrder, # qty must be less than the maximum allowed
'30005': InvalidOrder, # price exceeds maximum allowed
'30007': InvalidOrder, # price exceeds minimum allowed
'30008': InvalidOrder, # invalid order_type
'30009': ExchangeError, # no position found
'30010': InsufficientFunds, # insufficient wallet balance
'30011': PermissionDenied, # operation not allowed as position is undergoing liquidation
'30012': PermissionDenied, # operation not allowed as position is undergoing ADL
'30013': PermissionDenied, # position is in liq or adl status
'30014': InvalidOrder, # invalid closing order, qty should not greater than size
'30015': InvalidOrder, # invalid closing order, side should be opposite
'30016': ExchangeError, # TS and SL must be cancelled first while closing position
'30017': InvalidOrder, # estimated fill price cannot be lower than current Buy liq_price
'30018': InvalidOrder, # estimated fill price cannot be higher than current Sell liq_price
'30019': InvalidOrder, # cannot attach TP/SL params for non-zero position when placing non-opening position order
'30020': InvalidOrder, # position already has TP/SL params
'30021': InvalidOrder, # cannot afford estimated position_margin
'30022': InvalidOrder, # estimated buy liq_price cannot be higher than current mark_price
'30023': InvalidOrder, # estimated sell liq_price cannot be lower than current mark_price
'30024': InvalidOrder, # cannot set TP/SL/TS for zero-position
'30025': InvalidOrder, # trigger price should bigger than 10% of last price
'30026': InvalidOrder, # price too high
'30027': InvalidOrder, # price set for Take profit should be higher than Last Traded Price
'30028': InvalidOrder, # price set for Stop loss should be between Liquidation price and Last Traded Price
'30029': InvalidOrder, # price set for Stop loss should be between Last Traded Price and Liquidation price
'30030': InvalidOrder, # price set for Take profit should be lower than Last Traded Price
'30031': InsufficientFunds, # insufficient available balance for order cost
'30032': InvalidOrder, # order has been filled or cancelled
'30033': RateLimitExceeded, # The number of stop orders exceeds maximum limit allowed
'30034': OrderNotFound, # no order found
'30035': RateLimitExceeded, # too fast to cancel
'30036': ExchangeError, # the expected position value after order execution exceeds the current risk limit
'30037': InvalidOrder, # order already cancelled
'30041': ExchangeError, # no position found
'30042': InsufficientFunds, # insufficient wallet balance
'30043': InvalidOrder, # operation not allowed as position is undergoing liquidation
'30044': InvalidOrder, # operation not allowed as position is undergoing AD
'30045': InvalidOrder, # operation not allowed as position is not normal status
'30049': InsufficientFunds, # insufficient available balance
'30050': ExchangeError, # any adjustments made will trigger immediate liquidation
'30051': ExchangeError, # due to risk limit, cannot adjust leverage
'30052': ExchangeError, # leverage can not less than 1
'30054': ExchangeError, # position margin is invalid
'30057': ExchangeError, # requested quantity of contracts exceeds risk limit
'30063': ExchangeError, # reduce-only rule not satisfied
'30067': InsufficientFunds, # insufficient available balance
'30068': ExchangeError, # exit value must be positive
'30074': InvalidOrder, # can't create the stop order, because you expect the order will be triggered when the LastPrice(or IndexPrice、 MarkPrice, determined by trigger_by) is raising to stop_px, but the LastPrice(or IndexPrice、 MarkPrice) is already equal to or greater than stop_px, please adjust base_price or stop_px
'30075': InvalidOrder, # can't create the stop order, because you expect the order will be triggered when the LastPrice(or IndexPrice、 MarkPrice, determined by trigger_by) is falling to stop_px, but the LastPrice(or IndexPrice、 MarkPrice) is already equal to or less than stop_px, please adjust base_price or stop_px
'33004': AuthenticationError, # apikey already expired
'34026': ExchangeError, # the limit is no change
},
'broad': {
'unknown orderInfo': OrderNotFound, # {"ret_code":-1,"ret_msg":"unknown orderInfo","ext_code":"","ext_info":"","result":null,"time_now":"1584030414.005545","rate_limit_status":99,"rate_limit_reset_ms":1584030414003,"rate_limit":100}
'invalid api_key': AuthenticationError, # {"ret_code":10003,"ret_msg":"invalid api_key","ext_code":"","ext_info":"","result":null,"time_now":"1599547085.415797"}
},
},
'precisionMode': TICK_SIZE,
'options': {
'marketTypes': {
'BTC/USDT': 'linear',
'ETH/USDT': 'linear',
'BNB/USDT': 'linear',
'ADA/USDT': 'linear',
'DOGE/USDT': 'linear',
'XRP/USDT': 'linear',
'DOT/USDT': 'linear',
'UNI/USDT': 'linear',
'BCH/USDT': 'linear',
'LTC/USDT': 'linear',
'SOL/USDT': 'linear',
'LINK/USDT': 'linear',
'MATIC/USDT': 'linear',
'ETC/USDT': 'linear',
'FIL/USDT': 'linear',
'EOS/USDT': 'linear',
'AAVE/USDT': 'linear',
'XTZ/USDT': 'linear',
'SUSHI/USDT': 'linear',
'XEM/USDT': 'linear',
'BTC/USD': 'inverse',
'ETH/USD': 'inverse',
'EOS/USD': 'inverse',
'XRP/USD': 'inverse',
},
'defaultType': 'linear', # linear, inverse, futures
'code': 'BTC',
'cancelAllOrders': {
# 'method': 'v2PrivatePostOrderCancelAll', # v2PrivatePostStopOrderCancelAll
},
'recvWindow': 5 * 1000, # 5 sec default
'timeDifference': 0, # the difference between system clock and exchange server clock
'adjustForTimeDifference': False, # controls the adjustment logic upon instantiation
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'taker': 0.00075,
'maker': -0.00025,
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {},
'deposit': {},
},
},
})
def nonce(self):
return self.milliseconds() - self.options['timeDifference']
def load_time_difference(self, params={}):
serverTime = self.fetch_time(params)
after = self.milliseconds()
self.options['timeDifference'] = after - serverTime
return self.options['timeDifference']
def fetch_time(self, params={}):
response = self.v2PublicGetTime(params)
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: {},
# time_now: '1583933682.448826'
# }
#
return self.safe_timestamp(response, 'time_now')
def fetch_markets(self, params={}):
if self.options['adjustForTimeDifference']:
self.load_time_difference()
response = self.v2PublicGetSymbols(params)
#
# {
# "ret_code":0,
# "ret_msg":"OK",
# "ext_code":"",
# "ext_info":"",
# "result":[
# {
# "name":"BTCUSD",
# "alias":"BTCUSD",
# "status":"Trading",
# "base_currency":"BTC",
# "quote_currency":"USD",
# "price_scale":2,
# "taker_fee":"0.00075",
# "maker_fee":"-0.00025",
# "leverage_filter":{"min_leverage":1,"max_leverage":100,"leverage_step":"0.01"},
# "price_filter":{"min_price":"0.5","max_price":"999999.5","tick_size":"0.5"},
# "lot_size_filter":{"max_trading_qty":1000000,"min_trading_qty":1,"qty_step":1}
# },
# {
# "name":"BTCUSDT",
# "alias":"BTCUSDT",
# "status":"Trading",
# "base_currency":"BTC",
# "quote_currency":"USDT",
# "price_scale":2,
# "taker_fee":"0.00075",
# "maker_fee":"-0.00025",
# "leverage_filter":{"min_leverage":1,"max_leverage":100,"leverage_step":"0.01"},
# "price_filter":{"min_price":"0.5","max_price":"999999.5","tick_size":"0.5"},
# "lot_size_filter":{"max_trading_qty":100,"min_trading_qty":0.001,"qty_step":0.001}
# },
# ],
# "time_now":"1610539664.818033"
# }
#
markets = self.safe_value(response, 'result', [])
options = self.safe_value(self.options, 'fetchMarkets', {})
linearQuoteCurrencies = self.safe_value(options, 'linear', {'USDT': True})
result = []
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string_2(market, 'name', 'symbol')
baseId = self.safe_string(market, 'base_currency')
quoteId = self.safe_string(market, 'quote_currency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
linear = (quote in linearQuoteCurrencies)
inverse = not linear
symbol = base + '/' + quote
baseQuote = base + quote
type = 'swap'
if baseQuote != id:
symbol = id
type = 'futures'
lotSizeFilter = self.safe_value(market, 'lot_size_filter', {})
priceFilter = self.safe_value(market, 'price_filter', {})
precision = {
'amount': self.safe_number(lotSizeFilter, 'qty_step'),
'price': self.safe_number(priceFilter, 'tick_size'),
}
leverage = self.safe_value(market, 'leverage_filter', {})
status = self.safe_string(market, 'status')
active = None
if status is not None:
active = (status == 'Trading')
spot = (type == 'spot')
swap = (type == 'swap')
futures = (type == 'futures')
option = (type == 'option')
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'active': active,
'precision': precision,
'taker': self.safe_number(market, 'taker_fee'),
'maker': self.safe_number(market, 'maker_fee'),
'type': type,
'spot': spot,
'swap': swap,
'futures': futures,
'option': option,
'linear': linear,
'inverse': inverse,
'limits': {
'amount': {
'min': self.safe_number(lotSizeFilter, 'min_trading_qty'),
'max': self.safe_number(lotSizeFilter, 'max_trading_qty'),
},
'price': {
'min': self.safe_number(priceFilter, 'min_price'),
'max': self.safe_number(priceFilter, 'max_price'),
},
'cost': {
'min': None,
'max': None,
},
'leverage': {
'max': self.safe_number(leverage, 'max_leverage', 1),
},
},
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# symbol: 'BTCUSD',
# bid_price: '7680',
# ask_price: '7680.5',
# last_price: '7680.00',
# last_tick_direction: 'MinusTick',
# prev_price_24h: '7870.50',
# price_24h_pcnt: '-0.024204',
# high_price_24h: '8035.00',
# low_price_24h: '7671.00',
# prev_price_1h: '7780.00',
# price_1h_pcnt: '-0.012853',
# mark_price: '7683.27',
# index_price: '7682.74',
# open_interest: 188829147,
# open_value: '23670.06',
# total_turnover: '25744224.90',
# turnover_24h: '102997.83',
# total_volume: 225448878806,
# volume_24h: 809919408,
# funding_rate: '0.0001',
# predicted_funding_rate: '0.0001',
# next_funding_time: '2020-03-12T00:00:00Z',
# countdown_hour: 7
# }
#
timestamp = None
marketId = self.safe_string(ticker, 'symbol')
symbol = self.safe_symbol(marketId, market)
last = self.safe_number(ticker, 'last_price')
open = self.safe_number(ticker, 'prev_price_24h')
percentage = self.safe_number(ticker, 'price_24h_pcnt')
if percentage is not None:
percentage *= 100
change = None
average = None
if (last is not None) and (open is not None):
change = last - open
average = self.sum(open, last) / 2
baseVolume = self.safe_number(ticker, 'turnover_24h')
quoteVolume = self.safe_number(ticker, 'volume_24h')
vwap = self.vwap(baseVolume, quoteVolume)
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high_price_24h'),
'low': self.safe_number(ticker, 'low_price_24h'),
'bid': self.safe_number(ticker, 'bid_price'),
'bidVolume': None,
'ask': self.safe_number(ticker, 'ask_price'),
'askVolume': None,
'vwap': vwap,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.v2PublicGetTickers(self.extend(request, params))
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {
# symbol: 'BTCUSD',
# bid_price: '7680',
# ask_price: '7680.5',
# last_price: '7680.00',
# last_tick_direction: 'MinusTick',
# prev_price_24h: '7870.50',
# price_24h_pcnt: '-0.024204',
# high_price_24h: '8035.00',
# low_price_24h: '7671.00',
# prev_price_1h: '7780.00',
# price_1h_pcnt: '-0.012853',
# mark_price: '7683.27',
# index_price: '7682.74',
# open_interest: 188829147,
# open_value: '23670.06',
# total_turnover: '25744224.90',
# turnover_24h: '102997.83',
# total_volume: 225448878806,
# volume_24h: 809919408,
# funding_rate: '0.0001',
# predicted_funding_rate: '0.0001',
# next_funding_time: '2020-03-12T00:00:00Z',
# countdown_hour: 7
# }
# ],
# time_now: '1583948195.818255'
# }
#
result = self.safe_value(response, 'result', [])
first = self.safe_value(result, 0)
timestamp = self.safe_timestamp(response, 'time_now')
ticker = self.parse_ticker(first, market)
ticker['timestamp'] = timestamp
ticker['datetime'] = self.iso8601(timestamp)
return ticker
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.v2PublicGetTickers(params)
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {
# symbol: 'BTCUSD',
# bid_price: '7680',
# ask_price: '7680.5',
# last_price: '7680.00',
# last_tick_direction: 'MinusTick',
# prev_price_24h: '7870.50',
# price_24h_pcnt: '-0.024204',
# high_price_24h: '8035.00',
# low_price_24h: '7671.00',
# prev_price_1h: '7780.00',
# price_1h_pcnt: '-0.012853',
# mark_price: '7683.27',
# index_price: '7682.74',
# open_interest: 188829147,
# open_value: '23670.06',
# total_turnover: '25744224.90',
# turnover_24h: '102997.83',
# total_volume: 225448878806,
# volume_24h: 809919408,
# funding_rate: '0.0001',
# predicted_funding_rate: '0.0001',
# next_funding_time: '2020-03-12T00:00:00Z',
# countdown_hour: 7
# }
# ],
# time_now: '1583948195.818255'
# }
#
result = self.safe_value(response, 'result', [])
tickers = {}
for i in range(0, len(result)):
ticker = self.parse_ticker(result[i])
symbol = ticker['symbol']
tickers[symbol] = ticker
return self.filter_by_array(tickers, 'symbol', symbols)
def parse_ohlcv(self, ohlcv, market=None):
#
# inverse perpetual BTC/USD
#
# {
# symbol: 'BTCUSD',
# interval: '1',
# open_time: 1583952540,
# open: '7760.5',
# high: '7764',
# low: '7757',
# close: '7763.5',
# volume: '1259766',
# turnover: '162.32773718999994'
# }
#
# linear perpetual BTC/USDT
#
# {
# "id":143536,
# "symbol":"BTCUSDT",
# "period":"15",
# "start_at":1587883500,
# "volume":1.035,
# "open":7540.5,
# "high":7541,
# "low":7540.5,
# "close":7541
# }
#
return [
self.safe_timestamp_2(ohlcv, 'open_time', 'start_at'),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number_2(ohlcv, 'volume', 'turnover'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
price = self.safe_string(params, 'price')
params = self.omit(params, 'price')
request = {
'symbol': market['id'],
'interval': self.timeframes[timeframe],
}
duration = self.parse_timeframe(timeframe)
now = self.seconds()
if since is None:
if limit is None:
raise ArgumentsRequired(self.id + ' fetchOHLCV() requires a since argument or a limit argument')
else:
request['from'] = now - limit * duration
else:
request['from'] = int(since / 1000)
if limit is not None:
request['limit'] = limit # max 200, default 200
method = 'v2PublicGetKlineList'
if price == 'mark':
method = 'v2PublicGetMarkPriceKline'
elif price == 'index':
method = 'v2PublicGetIndexPriceKline'
elif price == 'premiumIndex':
method = 'v2PublicGetPremiumIndexKline'
elif market['linear']:
method = 'publicLinearGetKline'
response = getattr(self, method)(self.extend(request, params))
#
# inverse perpetual BTC/USD
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {
# symbol: 'BTCUSD',
# interval: '1',
# open_time: 1583952540,
# open: '7760.5',
# high: '7764',
# low: '7757',
# close: '7763.5',
# volume: '1259766',
# turnover: '162.32773718999994'
# },
# ],
# time_now: '1583953082.397330'
# }
#
# linear perpetual BTC/USDT
#
# {
# "ret_code":0,
# "ret_msg":"OK",
# "ext_code":"",
# "ext_info":"",
# "result":[
# {
# "id":143536,
# "symbol":"BTCUSDT",
# "period":"15",
# "start_at":1587883500,
# "volume":1.035,
# "open":7540.5,
# "high":7541,
# "low":7540.5,
# "close":7541
# }
# ],
# "time_now":"1587884120.168077"
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_ohlcvs(result, market, timeframe, since, limit)
def fetch_funding_rate(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
method = 'publicLinearGetFundingPrevFundingRate' if market['linear'] else 'v2PublicGetFundingPrevFundingRate'
response = getattr(self, method)(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "symbol": "BTCUSD",
# "funding_rate": "0.00010000",
# "funding_rate_timestamp": 1577433600
# },
# "ext_info": null,
# "time_now": "1577445586.446797",
# "rate_limit_status": 119,
# "rate_limit_reset_ms": 1577445586454,
# "rate_limit": 120
# }
#
result = self.safe_value(response, 'result')
nextFundingRate = self.safe_number(result, 'funding_rate')
previousFundingTime = self.safe_integer(result, 'funding_rate_timestamp') * 1000
nextFundingTime = previousFundingTime + (8 * 3600000)
currentTime = self.milliseconds()
return {
'info': result,
'symbol': symbol,
'markPrice': None,
'indexPrice': None,
'interestRate': None,
'estimatedSettlePrice': None,
'timestamp': currentTime,
'datetime': self.iso8601(currentTime),
'previousFundingRate': None,
'nextFundingRate': nextFundingRate,
'previousFundingTimestamp': previousFundingTime,
'nextFundingTimestamp': nextFundingTime,
'previousFundingDatetime': self.iso8601(previousFundingTime),
'nextFundingDatetime': self.iso8601(nextFundingTime),
}
def fetch_index_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if since is None and limit is None:
raise ArgumentsRequired(self.id + ' fetchIndexOHLCV() requires a since argument or a limit argument')
request = {
'price': 'index',
}
return self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
def fetch_mark_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if since is None and limit is None:
raise ArgumentsRequired(self.id + ' fetchMarkOHLCV() requires a since argument or a limit argument')
request = {
'price': 'mark',
}
return self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
def fetch_premium_index_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if since is None and limit is None:
raise ArgumentsRequired(self.id + ' fetchPremiumIndexOHLCV() requires a since argument or a limit argument')
request = {
'price': 'premiumIndex',
}
return self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id": "44275042152",
# "symbol": "AAVEUSDT",
# "price": "256.35",
# "qty": "0.1",
# "side": "Buy",
# "time": "2021-11-30T12:46:14.000Z",
# "trade_time_ms": "1638276374312"
# }
#
# fetchMyTrades, fetchOrderTrades(private)
#
# {
# "order_id": "b020b4bc-6fe2-45b5-adbc-dd07794f9746",
# "order_link_id": "",
# "side": "Buy",
# "symbol": "AAVEUSDT",
# "exec_id": "09abe8f0-aea6-514e-942b-7da8cb935120",
# "price": "269.3",
# "order_price": "269.3",
# "order_qty": "0.1",
# "order_type": "Market",
# "fee_rate": "0.00075",
# "exec_price": "256.35",
# "exec_type": "Trade",
# "exec_qty": "0.1",
# "exec_fee": "0.01922625",
# "exec_value": "25.635",
# "leaves_qty": "0",
# "closed_size": "0",
# "last_liquidity_ind": "RemovedLiquidity",
# "trade_time": "1638276374",
# "trade_time_ms": "1638276374312"
# }
#
id = self.safe_string_2(trade, 'id', 'exec_id')
marketId = self.safe_string(trade, 'symbol')
market = self.safe_market(marketId, market)
symbol = market['symbol']
amountString = self.safe_string_2(trade, 'qty', 'exec_qty')
priceString = self.safe_string_2(trade, 'exec_price', 'price')
costString = self.safe_string(trade, 'exec_value')
timestamp = self.parse8601(self.safe_string(trade, 'time'))
if timestamp is None:
timestamp = self.safe_integer(trade, 'trade_time_ms')
side = self.safe_string_lower(trade, 'side')
lastLiquidityInd = self.safe_string(trade, 'last_liquidity_ind')
takerOrMaker = 'maker' if (lastLiquidityInd == 'AddedLiquidity') else 'taker'
feeCostString = self.safe_string(trade, 'exec_fee')
fee = None
if feeCostString is not None:
feeCurrencyCode = market['base'] if market['inverse'] else market['quote']
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
'rate': self.safe_string(trade, 'fee_rate'),
}
return self.safe_trade({
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': self.safe_string(trade, 'order_id'),
'type': self.safe_string_lower(trade, 'order_type'),
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': costString,
'fee': fee,
}, market)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'from': 123, # from id
}
if limit is not None:
request['count'] = limit # default 500, max 1000
method = 'publicLinearGetRecentTradingRecords' if market['linear'] else 'v2PublicGetTradingRecords'
response = getattr(self, method)(self.extend(request, params))
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {
# id: 43785688,
# symbol: 'BTCUSD',
# price: 7786,
# qty: 67,
# side: 'Sell',
# time: '2020-03-11T19:18:30.123Z'
# },
# ],
# time_now: '1583954313.393362'
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_trades(result, market, since, limit)
def parse_order_book(self, orderbook, symbol, timestamp=None, bidsKey='Buy', asksKey='Sell', priceKey='price', amountKey='size'):
bids = []
asks = []
for i in range(0, len(orderbook)):
bidask = orderbook[i]
side = self.safe_string(bidask, 'side')
if side == 'Buy':
bids.append(self.parse_bid_ask(bidask, priceKey, amountKey))
elif side == 'Sell':
asks.append(self.parse_bid_ask(bidask, priceKey, amountKey))
else:
raise ExchangeError(self.id + ' parseOrderBook encountered an unrecognized bidask format: ' + self.json(bidask))
return {
'symbol': symbol,
'bids': self.sort_by(bids, 0, True),
'asks': self.sort_by(asks, 0),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'nonce': None,
}
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.v2PublicGetOrderBookL2(self.extend(request, params))
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {symbol: 'BTCUSD', price: '7767.5', size: 677956, side: 'Buy'},
# {symbol: 'BTCUSD', price: '7767', size: 580690, side: 'Buy'},
# {symbol: 'BTCUSD', price: '7766.5', size: 475252, side: 'Buy'},
# {symbol: 'BTCUSD', price: '7768', size: 330847, side: 'Sell'},
# {symbol: 'BTCUSD', price: '7768.5', size: 97159, side: 'Sell'},
# {symbol: 'BTCUSD', price: '7769', size: 6508, side: 'Sell'},
# ],
# time_now: '1583954829.874823'
# }
#
result = self.safe_value(response, 'result', [])
timestamp = self.safe_timestamp(response, 'time_now')
return self.parse_order_book(result, symbol, timestamp, 'Buy', 'Sell', 'price', 'size')
def fetch_balance(self, params={}):
# note: any funds in the 'spot' account will not be returned or visible from self endpoint
self.load_markets()
request = {}
coin = self.safe_string(params, 'coin')
code = self.safe_string(params, 'code')
if coin is not None:
request['coin'] = coin
elif code is not None:
currency = self.currency(code)
request['coin'] = currency['id']
response = self.v2PrivateGetWalletBalance(self.extend(request, params))
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: {
# BTC: {
# equity: 0,
# available_balance: 0,
# used_margin: 0,
# order_margin: 0,
# position_margin: 0,
# occ_closing_fee: 0,
# occ_funding_fee: 0,
# wallet_balance: 0,
# realised_pnl: 0,
# unrealised_pnl: 0,
# cum_realised_pnl: 0,
# given_cash: 0,
# service_cash: 0
# }
# },
# time_now: '1583937810.370020',
# rate_limit_status: 119,
# rate_limit_reset_ms: 1583937810367,
# rate_limit: 120
# }
#
result = {
'info': response,
}
balances = self.safe_value(response, 'result', {})
currencyIds = list(balances.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
balance = balances[currencyId]
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available_balance')
account['used'] = self.safe_string(balance, 'used_margin')
account['total'] = self.safe_string(balance, 'equity')
result[code] = account
return self.parse_balance(result)
def parse_order_status(self, status):
statuses = {
# basic orders
'Created': 'open',
'Rejected': 'rejected', # order is triggered but failed upon being placed
'New': 'open',
'PartiallyFilled': 'open',
'Filled': 'closed',
'Cancelled': 'canceled',
'PendingCancel': 'canceling', # the engine has received the cancellation but there is no guarantee that it will be successful
# conditional orders
'Active': 'open', # order is triggered and placed successfully
'Untriggered': 'open', # order waits to be triggered
'Triggered': 'closed', # order is triggered
# 'Cancelled': 'canceled', # order is cancelled
# 'Rejected': 'rejected', # order is triggered but fail to be placed
'Deactivated': 'canceled', # conditional order was cancelled before triggering
}
return self.safe_string(statuses, status, status)
def parse_time_in_force(self, timeInForce):
timeInForces = {
'GoodTillCancel': 'GTC',
'ImmediateOrCancel': 'IOC',
'FillOrKill': 'FOK',
'PostOnly': 'PO',
}
return self.safe_string(timeInForces, timeInForce, timeInForce)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "user_id": 1,
# "order_id": "335fd977-e5a5-4781-b6d0-c772d5bfb95b",
# "symbol": "BTCUSD",
# "side": "Buy",
# "order_type": "Limit",
# "price": 8800,
# "qty": 1,
# "time_in_force": "GoodTillCancel",
# "order_status": "Created",
# "last_exec_time": 0,
# "last_exec_price": 0,
# "leaves_qty": 1,
# "cum_exec_qty": 0, # in contracts, where 1 contract = 1 quote currency unit(USD for inverse contracts)
# "cum_exec_value": 0, # in contract's underlying currency(BTC for inverse contracts)
# "cum_exec_fee": 0,
# "reject_reason": "",
# "order_link_id": "",
# "created_at": "2019-11-30T11:03:43.452Z",
# "updated_at": "2019-11-30T11:03:43.455Z"
# }
#
# fetchOrder
#
# {
# "user_id" : 599946,
# "symbol" : "BTCUSD",
# "side" : "Buy",
# "order_type" : "Limit",
# "price" : "7948",
# "qty" : 10,
# "time_in_force" : "GoodTillCancel",
# "order_status" : "Filled",
# "ext_fields" : {
# "o_req_num" : -1600687220498,
# "xreq_type" : "x_create"
# },
# "last_exec_time" : "1588150113.968422",
# "last_exec_price" : "7948",
# "leaves_qty" : 0,
# "leaves_value" : "0",
# "cum_exec_qty" : 10,
# "cum_exec_value" : "0.00125817",
# "cum_exec_fee" : "-0.00000031",
# "reject_reason" : "",
# "cancel_type" : "",
# "order_link_id" : "",
# "created_at" : "2020-04-29T08:45:24.399146Z",
# "updated_at" : "2020-04-29T08:48:33.968422Z",
# "order_id" : "dd2504b9-0157-406a-99e1-efa522373944"
# }
#
# conditional order
#
# {
# "user_id":##,
# "symbol":"BTCUSD",
# "side":"Buy",
# "order_type":"Market",
# "price":0,
# "qty":10,
# "time_in_force":"GoodTillCancel",
# "stop_order_type":"Stop",
# "trigger_by":"LastPrice",
# "base_price":11833,
# "order_status":"Untriggered",
# "ext_fields":{
# "stop_order_type":"Stop",
# "trigger_by":"LastPrice",
# "base_price":11833,
# "expected_direction":"Rising",
# "trigger_price":12400,
# "close_on_trigger":true,
# "op_from":"api",
# "remark":"x.x.x.x",
# "o_req_num":0
# },
# "leaves_qty":10,
# "leaves_value":0.00080645,
# "reject_reason":null,
# "cross_seq":-1,
# "created_at":"2020-08-21T09:18:48.000Z",
# "updated_at":"2020-08-21T09:18:48.000Z",
# "trigger_price":12400,
# "stop_order_id":"3f3b54b1-3379-42c7-8510-44f4d9915be0"
# }
#
marketId = self.safe_string(order, 'symbol')
market = self.safe_market(marketId, market)
symbol = market['symbol']
feeCurrency = None
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
id = self.safe_string_2(order, 'order_id', 'stop_order_id')
type = self.safe_string_lower(order, 'order_type')
price = self.safe_string(order, 'price')
average = self.safe_string(order, 'average_price')
amount = self.safe_string(order, 'qty')
cost = self.safe_string(order, 'cum_exec_value')
filled = self.safe_string(order, 'cum_exec_qty')
remaining = self.safe_string(order, 'leaves_qty')
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol)
if market is not None:
if marketType == 'linear':
feeCurrency = market['quote']
else:
feeCurrency = market['base']
lastTradeTimestamp = self.safe_timestamp(order, 'last_exec_time')
if lastTradeTimestamp == 0:
lastTradeTimestamp = None
status = self.parse_order_status(self.safe_string_2(order, 'order_status', 'stop_order_status'))
side = self.safe_string_lower(order, 'side')
feeCostString = Precise.string_abs(self.safe_string(order, 'cum_exec_fee'))
fee = None
if feeCostString is not None:
fee = {
'cost': feeCostString,
'currency': feeCurrency,
}
clientOrderId = self.safe_string(order, 'order_link_id')
if (clientOrderId is not None) and (len(clientOrderId) < 1):
clientOrderId = None
timeInForce = self.parse_time_in_force(self.safe_string(order, 'time_in_force'))
stopPrice = self.safe_number_2(order, 'trigger_price', 'stop_px')
postOnly = (timeInForce == 'PO')
return self.safe_order2({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': None,
}, market)
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'order_link_id': 'string', # one of order_id, stop_order_id or order_link_id is required
# regular orders ---------------------------------------------
# 'order_id': id, # one of order_id or order_link_id is required for regular orders
# conditional orders ---------------------------------------------
# 'stop_order_id': id, # one of stop_order_id or order_link_id is required for conditional orders
}
method = None
if market['swap']:
if market['linear']:
method = 'privateLinearGetOrderSearch'
elif market['inverse']:
method = 'v2PrivateGetOrder'
elif market['futures']:
method = 'futuresPrivateGetOrder'
stopOrderId = self.safe_string(params, 'stop_order_id')
if stopOrderId is None:
orderLinkId = self.safe_string(params, 'order_link_id')
if orderLinkId is None:
request['order_id'] = id
else:
if market['swap']:
if market['linear']:
method = 'privateLinearGetStopOrderSearch'
elif market['inverse']:
method = 'v2PrivateGetStopOrder'
elif market['futures']:
method = 'futuresPrivateGetStopOrder'
response = getattr(self, method)(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "OK",
# "ext_code": "",
# "ext_info": "",
# "result": {
# "user_id": 1,
# "symbol": "BTCUSD",
# "side": "Sell",
# "order_type": "Limit",
# "price": "8083",
# "qty": 10,
# "time_in_force": "GoodTillCancel",
# "order_status": "New",
# "ext_fields": {"o_req_num": -308787, "xreq_type": "x_create", "xreq_offset": 4154640},
# "leaves_qty": 10,
# "leaves_value": "0.00123716",
# "cum_exec_qty": 0,
# "reject_reason": "",
# "order_link_id": "",
# "created_at": "2019-10-21T07:28:19.396246Z",
# "updated_at": "2019-10-21T07:28:19.396246Z",
# "order_id": "efa44157-c355-4a98-b6d6-1d846a936b93"
# },
# "time_now": "1571651135.291930",
# "rate_limit_status": 99, # The remaining number of accesses in one minute
# "rate_limit_reset_ms": 1580885703683,
# "rate_limit": 100
# }
#
# conditional orders
#
# {
# "ret_code": 0,
# "ret_msg": "OK",
# "ext_code": "",
# "ext_info": "",
# "result": {
# "user_id": 1,
# "symbol": "BTCUSD",
# "side": "Buy",
# "order_type": "Limit",
# "price": "8000",
# "qty": 1,
# "time_in_force": "GoodTillCancel",
# "order_status": "Untriggered",
# "ext_fields": {},
# "leaves_qty": 1,
# "leaves_value": "0.00013333",
# "cum_exec_qty": 0,
# "cum_exec_value": null,
# "cum_exec_fee": null,
# "reject_reason": "",
# "order_link_id": "",
# "created_at": "2019-12-27T19:56:24.052194Z",
# "updated_at": "2019-12-27T19:56:24.052194Z",
# "order_id": "378a1bbc-a93a-4e75-87f4-502ea754ba36"
# },
# "time_now": "1577476584.386958",
# "rate_limit_status": 99,
# "rate_limit_reset_ms": 1580885703683,
# "rate_limit": 100
# }
#
result = self.safe_value(response, 'result')
return self.parse_order(result, market)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
qty = self.amount_to_precision(symbol, amount)
if market['inverse']:
qty = int(qty)
else:
qty = float(qty)
request = {
# orders ---------------------------------------------------------
'side': self.capitalize(side),
'symbol': market['id'],
'order_type': self.capitalize(type),
'qty': qty, # order quantity in USD, integer only
# 'price': float(self.price_to_precision(symbol, price)), # required for limit orders
'time_in_force': 'GoodTillCancel', # ImmediateOrCancel, FillOrKill, PostOnly
# 'take_profit': 123.45, # take profit price, only take effect upon opening the position
# 'stop_loss': 123.45, # stop loss price, only take effect upon opening the position
# 'reduce_only': False, # reduce only, required for linear orders
# when creating a closing order, bybit recommends a True value for
# close_on_trigger to avoid failing due to insufficient available margin
# 'close_on_trigger': False, required for linear orders
# 'order_link_id': 'string', # unique client order id, max 36 characters
# conditional orders ---------------------------------------------
# base_price is used to compare with the value of stop_px, to decide
# whether your conditional order will be triggered by crossing trigger
# price from upper side or lower side, mainly used to identify the
# expected direction of the current conditional order
# 'base_price': 123.45, # required for conditional orders
# 'stop_px': 123.45, # trigger price, required for conditional orders
# 'trigger_by': 'LastPrice', # IndexPrice, MarkPrice
}
priceIsRequired = False
if type == 'limit':
priceIsRequired = True
if priceIsRequired:
if price is not None:
request['price'] = float(self.price_to_precision(symbol, price))
else:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument for a ' + type + ' order')
clientOrderId = self.safe_string_2(params, 'order_link_id', 'clientOrderId')
if clientOrderId is not None:
request['order_link_id'] = clientOrderId
params = self.omit(params, ['order_link_id', 'clientOrderId'])
stopPx = self.safe_value_2(params, 'stop_px', 'stopPrice')
basePrice = self.safe_value(params, 'base_price')
method = None
if market['swap']:
if market['linear']:
method = 'privateLinearPostOrderCreate'
request['reduce_only'] = False
request['close_on_trigger'] = False
elif market['inverse']:
method = 'v2PrivatePostOrderCreate'
elif market['futures']:
method = 'futuresPrivatePostOrderCreate'
if stopPx is not None:
if basePrice is None:
raise ArgumentsRequired(self.id + ' createOrder() requires both the stop_px and base_price params for a conditional ' + type + ' order')
else:
if market['swap']:
if market['linear']:
method = 'privateLinearPostStopOrderCreate'
elif market['inverse']:
method = 'v2PrivatePostStopOrderCreate'
elif market['futures']:
method = 'futuresPrivatePostStopOrderCreate'
request['stop_px'] = float(self.price_to_precision(symbol, stopPx))
request['base_price'] = float(self.price_to_precision(symbol, basePrice))
params = self.omit(params, ['stop_px', 'stopPrice', 'base_price'])
elif basePrice is not None:
raise ArgumentsRequired(self.id + ' createOrder() requires both the stop_px and base_price params for a conditional ' + type + ' order')
response = getattr(self, method)(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "OK",
# "ext_code": "",
# "ext_info": "",
# "result": {
# "user_id": 1,
# "order_id": "335fd977-e5a5-4781-b6d0-c772d5bfb95b",
# "symbol": "BTCUSD",
# "side": "Buy",
# "order_type": "Limit",
# "price": 8800,
# "qty": 1,
# "time_in_force": "GoodTillCancel",
# "order_status": "Created",
# "last_exec_time": 0,
# "last_exec_price": 0,
# "leaves_qty": 1,
# "cum_exec_qty": 0,
# "cum_exec_value": 0,
# "cum_exec_fee": 0,
# "reject_reason": "",
# "order_link_id": "",
# "created_at": "2019-11-30T11:03:43.452Z",
# "updated_at": "2019-11-30T11:03:43.455Z"
# },
# "time_now": "1575111823.458705",
# "rate_limit_status": 98,
# "rate_limit_reset_ms": 1580885703683,
# "rate_limit": 100
# }
#
# conditional orders
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "user_id": 1,
# "symbol": "BTCUSD",
# "side": "Buy",
# "order_type": "Limit",
# "price": 8000,
# "qty": 1,
# "time_in_force": "GoodTillCancel",
# "stop_order_type": "Stop",
# "trigger_by": "LastPrice",
# "base_price": 7000,
# "order_status": "Untriggered",
# "ext_fields": {
# "stop_order_type": "Stop",
# "trigger_by": "LastPrice",
# "base_price": 7000,
# "expected_direction": "Rising",
# "trigger_price": 7500,
# "op_from": "api",
# "remark": "127.0.01",
# "o_req_num": 0
# },
# "leaves_qty": 1,
# "leaves_value": 0.00013333,
# "reject_reason": null,
# "cross_seq": -1,
# "created_at": "2019-12-27T12:48:24.000Z",
# "updated_at": "2019-12-27T12:48:24.000Z",
# "stop_px": 7500,
# "stop_order_id": "a85cd1c0-a9a4-49d3-a1bd-bab5ebe946d5"
# },
# "ext_info": null,
# "time_now": "1577450904.327654",
# "rate_limit_status": 99,
# "rate_limit_reset_ms": 1577450904335,
# "rate_limit": "100"
# }
#
result = self.safe_value(response, 'result')
return self.parse_order(result, market)
def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' editOrder() requires an symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
# 'order_id': id, # only for non-conditional orders
'symbol': market['id'],
# 'p_r_qty': self.amount_to_precision(symbol, amount), # new order quantity, optional
# 'p_r_price' self.priceToprecision(symbol, price), # new order price, optional
# ----------------------------------------------------------------
# conditional orders
# 'stop_order_id': id, # only for conditional orders
# 'p_r_trigger_price': 123.45, # new trigger price also known as stop_px
}
method = None
if market['swap']:
if market['linear']:
method = 'privateLinearPostOrderReplace'
elif market['inverse']:
method = 'v2PrivatePostOrderReplace'
elif market['futures']:
method = 'futuresPrivatePostOrderReplace'
stopOrderId = self.safe_string(params, 'stop_order_id')
if stopOrderId is not None:
if market['swap']:
if market['linear']:
method = 'privateLinearPostStopOrderReplace'
elif market['inverse']:
method = 'v2PrivatePostStopOrderReplace'
elif market['futures']:
method = 'futuresPrivatePostStopOrderReplace'
request['stop_order_id'] = stopOrderId
params = self.omit(params, ['stop_order_id'])
else:
request['order_id'] = id
if amount is not None:
qty = self.amount_to_precision(symbol, amount)
if market['inverse']:
qty = int(qty)
else:
qty = float(qty)
request['p_r_qty'] = qty
if price is not None:
request['p_r_price'] = float(self.price_to_precision(symbol, price))
response = getattr(self, method)(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {"order_id": "efa44157-c355-4a98-b6d6-1d846a936b93"},
# "time_now": "1539778407.210858",
# "rate_limit_status": 99, # remaining number of accesses in one minute
# "rate_limit_reset_ms": 1580885703683,
# "rate_limit": 100
# }
#
# conditional orders
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {"stop_order_id": "378a1bbc-a93a-4e75-87f4-502ea754ba36"},
# "ext_info": null,
# "time_now": "1577475760.604942",
# "rate_limit_status": 96,
# "rate_limit_reset_ms": 1577475760612,
# "rate_limit": "100"
# }
#
result = self.safe_value(response, 'result', {})
return {
'info': response,
'id': self.safe_string_2(result, 'order_id', 'stop_order_id'),
'order_id': self.safe_string(result, 'order_id'),
'stop_order_id': self.safe_string(result, 'stop_order_id'),
}
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'order_link_id': 'string', # one of order_id, stop_order_id or order_link_id is required
# regular orders ---------------------------------------------
# 'order_id': id, # one of order_id or order_link_id is required for regular orders
# conditional orders ---------------------------------------------
# 'stop_order_id': id, # one of stop_order_id or order_link_id is required for conditional orders
}
method = None
if market['swap']:
if market['linear']:
method = 'privateLinearPostOrderCancel'
elif market['inverse']:
method = 'v2PrivatePostOrderCancel'
elif market['futures']:
method = 'futuresPrivatePostOrderCancel'
stopOrderId = self.safe_string(params, 'stop_order_id')
if stopOrderId is None:
orderLinkId = self.safe_string(params, 'order_link_id')
if orderLinkId is None:
request['order_id'] = id
else:
if market['swap']:
if market['linear']:
method = 'privateLinearPostStopOrderCancel'
elif market['inverse']:
method = 'v2PrivatePostStopOrderCancel'
elif market['futures']:
method = 'futuresPrivatePostStopOrderCancel'
response = getattr(self, method)(self.extend(request, params))
result = self.safe_value(response, 'result', {})
return self.parse_order(result, market)
def cancel_all_orders(self, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelAllOrders() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
options = self.safe_value(self.options, 'cancelAllOrders', {})
defaultMethod = None
if market['swap']:
if market['linear']:
defaultMethod = 'privateLinearPostOrderCancelAll'
elif market['inverse']:
defaultMethod = 'v2PrivatePostOrderCancelAll'
elif market['futures']:
defaultMethod = 'futuresPrivatePostOrderCancelAll'
method = self.safe_string(options, 'method', defaultMethod)
response = getattr(self, method)(self.extend(request, params))
result = self.safe_value(response, 'result', [])
return self.parse_orders(result, market)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'order_id': 'string'
# 'order_link_id': 'string', # unique client order id, max 36 characters
# 'symbol': market['id'], # default BTCUSD
# 'order': 'desc', # asc
# 'page': 1,
# 'limit': 20, # max 50
# 'order_status': 'Created,New'
# conditional orders ---------------------------------------------
# 'stop_order_id': 'string',
# 'stop_order_status': 'Untriggered',
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if limit is not None:
request['limit'] = limit
options = self.safe_value(self.options, 'fetchOrders', {})
defaultType = self.safe_string(self.options, 'defaultType', 'linear')
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol, defaultType)
defaultMethod = None
marketDefined = (market is not None)
linear = (marketDefined and market['linear']) or (marketType == 'linear')
inverse = (marketDefined and market['swap'] and market['inverse']) or (marketType == 'inverse')
futures = (marketDefined and market['futures']) or (marketType == 'futures')
if linear:
defaultMethod = 'privateLinearGetOrderList'
elif inverse:
defaultMethod = 'v2PrivateGetOrderList'
elif futures:
defaultMethod = 'futuresPrivateGetOrderList'
query = params
if ('stop_order_id' in params) or ('stop_order_status' in params):
stopOrderStatus = self.safe_value(params, 'stop_order_status')
if stopOrderStatus is not None:
if isinstance(stopOrderStatus, list):
stopOrderStatus = ','.join(stopOrderStatus)
request['stop_order_status'] = stopOrderStatus
query = self.omit(params, 'stop_order_status')
if linear:
defaultMethod = 'privateLinearGetStopOrderList'
elif inverse:
defaultMethod = 'v2PrivateGetStopOrderList'
elif futures:
defaultMethod = 'futuresPrivateGetStopOrderList'
method = self.safe_string(options, 'method', defaultMethod)
response = getattr(self, method)(self.extend(request, query))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "current_page": 1,
# "last_page": 6,
# "data": [
# {
# "user_id": 1,
# "symbol": "BTCUSD",
# "side": "Sell",
# "order_type": "Market",
# "price": 7074,
# "qty": 2,
# "time_in_force": "ImmediateOrCancel",
# "order_status": "Filled",
# "ext_fields": {
# "close_on_trigger": True,
# "orig_order_type": "BLimit",
# "prior_x_req_price": 5898.5,
# "op_from": "pc",
# "remark": "127.0.0.1",
# "o_req_num": -34799032763,
# "xreq_type": "x_create"
# },
# "last_exec_time": "1577448481.696421",
# "last_exec_price": 7070.5,
# "leaves_qty": 0,
# "leaves_value": 0,
# "cum_exec_qty": 2,
# "cum_exec_value": 0.00028283,
# "cum_exec_fee": 0.00002,
# "reject_reason": "NoError",
# "order_link_id": "",
# "created_at": "2019-12-27T12:08:01.000Z",
# "updated_at": "2019-12-27T12:08:01.000Z",
# "order_id": "f185806b-b801-40ff-adec-52289370ed62"
# }
# ]
# },
# "ext_info": null,
# "time_now": "1577448922.437871",
# "rate_limit_status": 98,
# "rate_limit_reset_ms": 1580885703683,
# "rate_limit": 100
# }
#
# conditional orders
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "current_page": 1,
# "last_page": 1,
# "data": [
# {
# "user_id": 1,
# "stop_order_status": "Untriggered",
# "symbol": "BTCUSD",
# "side": "Buy",
# "order_type": "Limit",
# "price": 8000,
# "qty": 1,
# "time_in_force": "GoodTillCancel",
# "stop_order_type": "Stop",
# "trigger_by": "LastPrice",
# "base_price": 7000,
# "order_link_id": "",
# "created_at": "2019-12-27T12:48:24.000Z",
# "updated_at": "2019-12-27T12:48:24.000Z",
# "stop_px": 7500,
# "stop_order_id": "a85cd1c0-a9a4-49d3-a1bd-bab5ebe946d5"
# },
# ]
# },
# "ext_info": null,
# "time_now": "1577451658.755468",
# "rate_limit_status": 599,
# "rate_limit_reset_ms": 1577451658762,
# "rate_limit": 600
# }
#
result = self.safe_value(response, 'result', {})
data = self.safe_value(result, 'data', [])
return self.parse_orders(data, market, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
defaultStatuses = [
'Rejected',
'Filled',
'Cancelled',
# conditional orders
# 'Active',
# 'Triggered',
# 'Cancelled',
# 'Rejected',
# 'Deactivated',
]
options = self.safe_value(self.options, 'fetchClosedOrders', {})
status = self.safe_value(options, 'order_status', defaultStatuses)
if isinstance(status, list):
status = ','.join(status)
request = {}
stopOrderStatus = self.safe_value(params, 'stop_order_status')
if stopOrderStatus is None:
request['order_status'] = status
else:
request['stop_order_status'] = stopOrderStatus
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
defaultStatuses = [
'Created',
'New',
'PartiallyFilled',
'PendingCancel',
# conditional orders
# 'Untriggered',
]
options = self.safe_value(self.options, 'fetchOpenOrders', {})
status = self.safe_value(options, 'order_status', defaultStatuses)
if isinstance(status, list):
status = ','.join(status)
request = {}
stopOrderStatus = self.safe_value(params, 'stop_order_status')
if stopOrderStatus is None:
request['order_status'] = status
else:
request['stop_order_status'] = stopOrderStatus
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
request = {
'order_id': id,
}
return self.fetch_my_trades(symbol, since, limit, self.extend(request, params))
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
self.load_markets()
request = {
# 'order_id': 'f185806b-b801-40ff-adec-52289370ed62', # if not provided will return user's trading records
# 'symbol': market['id'],
# 'start_time': int(since / 1000),
# 'page': 1,
# 'limit' 20, # max 50
}
market = None
orderId = self.safe_string(params, 'order_id')
if orderId is not None:
request['order_id'] = orderId
params = self.omit(params, 'order_id')
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['start_time'] = since
if limit is not None:
request['limit'] = limit # default 20, max 50
defaultType = self.safe_string(self.options, 'defaultType', 'linear')
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol, defaultType)
marketDefined = (market is not None)
linear = (marketDefined and market['linear']) or (marketType == 'linear')
inverse = (marketDefined and market['swap'] and market['inverse']) or (marketType == 'inverse')
futures = (marketDefined and market['futures']) or (marketType == 'futures')
method = None
if linear:
method = 'privateLinearGetTradeExecutionList'
elif inverse:
method = 'v2PrivateGetExecutionList'
elif futures:
method = 'futuresPrivateGetExecutionList'
response = getattr(self, method)(self.extend(request, params))
#
# inverse
#
# {
# "ret_code": 0,
# "ret_msg": "OK",
# "ext_code": "",
# "ext_info": "",
# "result": {
# "order_id": "Abandonednot !", # Abandonednot !
# "trade_list": [
# {
# "closed_size": 0,
# "cross_seq": 277136382,
# "exec_fee": "0.0000001",
# "exec_id": "256e5ef8-abfe-5772-971b-f944e15e0d68",
# "exec_price": "8178.5",
# "exec_qty": 1,
# "exec_time": "1571676941.70682",
# "exec_type": "Trade", #Exec Type Enum
# "exec_value": "0.00012227",
# "fee_rate": "0.00075",
# "last_liquidity_ind": "RemovedLiquidity", #Liquidity Enum
# "leaves_qty": 0,
# "nth_fill": 2,
# "order_id": "7ad50cb1-9ad0-4f74-804b-d82a516e1029",
# "order_link_id": "",
# "order_price": "8178",
# "order_qty": 1,
# "order_type": "Market", #Order Type Enum
# "side": "Buy", #Side Enum
# "symbol": "BTCUSD", #Symbol Enum
# "user_id": 1
# }
# ]
# },
# "time_now": "1577483699.281488",
# "rate_limit_status": 118,
# "rate_limit_reset_ms": 1577483699244737,
# "rate_limit": 120
# }
#
# linear
#
# {
# "ret_code":0,
# "ret_msg":"OK",
# "ext_code":"",
# "ext_info":"",
# "result":{
# "current_page":1,
# "data":[
# {
# "order_id":"b59418ec-14d4-4ef9-b9f4-721d5d576974",
# "order_link_id":"",
# "side":"Sell",
# "symbol":"BTCUSDT",
# "exec_id":"0327284d-faec-5191-bd89-acc5b4fafda9",
# "price":0.5,
# "order_price":0.5,
# "order_qty":0.01,
# "order_type":"Market",
# "fee_rate":0.00075,
# "exec_price":9709.5,
# "exec_type":"Trade",
# "exec_qty":0.01,
# "exec_fee":0.07282125,
# "exec_value":97.095,
# "leaves_qty":0,
# "closed_size":0.01,
# "last_liquidity_ind":"RemovedLiquidity",
# "trade_time":1591648052,
# "trade_time_ms":1591648052861
# }
# ]
# },
# "time_now":"1591736501.979264",
# "rate_limit_status":119,
# "rate_limit_reset_ms":1591736501974,
# "rate_limit":120
# }
#
result = self.safe_value(response, 'result', {})
trades = self.safe_value_2(result, 'trade_list', 'data', [])
return self.parse_trades(trades, market, since, limit)
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'coin': currency['id'],
# 'currency': currency['id'], # alias
# 'start_date': self.iso8601(since),
# 'end_date': self.iso8601(till),
'wallet_fund_type': 'Deposit', # Deposit, Withdraw, RealisedPNL, Commission, Refund, Prize, ExchangeOrderWithdraw, ExchangeOrderDeposit
# 'page': 1,
# 'limit': 20, # max 50
}
currency = None
if code is not None:
currency = self.currency(code)
request['coin'] = currency['id']
if since is not None:
request['start_date'] = self.yyyymmdd(since)
if limit is not None:
request['limit'] = limit
response = self.v2PrivateGetWalletFundRecords(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "data": [
# {
# "id": 234467,
# "user_id": 1,
# "coin": "BTC",
# "wallet_id": 27913,
# "type": "Realized P&L",
# "amount": "-0.00000006",
# "tx_id": "",
# "address": "BTCUSD",
# "wallet_balance": "0.03000330",
# "exec_time": "2019-12-09T00:00:25.000Z",
# "cross_seq": 0
# }
# ]
# },
# "ext_info": null,
# "time_now": "1577481867.115552",
# "rate_limit_status": 119,
# "rate_limit_reset_ms": 1577481867122,
# "rate_limit": 120
# }
#
result = self.safe_value(response, 'result', {})
data = self.safe_value(result, 'data', [])
return self.parse_transactions(data, currency, since, limit, {'type': 'deposit'})
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'coin': currency['id'],
# 'start_date': self.iso8601(since),
# 'end_date': self.iso8601(till),
# 'status': 'Pending', # ToBeConfirmed, UnderReview, Pending, Success, CancelByUser, Reject, Expire
# 'page': 1,
# 'limit': 20, # max 50
}
currency = None
if code is not None:
currency = self.currency(code)
request['coin'] = currency['id']
if since is not None:
request['start_date'] = self.yyyymmdd(since)
if limit is not None:
request['limit'] = limit
response = self.v2PrivateGetWalletWithdrawList(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "data": [
# {
# "id": 137,
# "user_id": 1,
# "coin": "XRP", # Coin Enum
# "status": "Pending", # Withdraw Status Enum
# "amount": "20.00000000",
# "fee": "0.25000000",
# "address": "rH7H595XYEVTEHU2FySYsWnmfACBnZS9zM",
# "tx_id": "",
# "submited_at": "2019-06-11T02:20:24.000Z",
# "updated_at": "2019-06-11T02:20:24.000Z"
# },
# ],
# "current_page": 1,
# "last_page": 1
# },
# "ext_info": null,
# "time_now": "1577482295.125488",
# "rate_limit_status": 119,
# "rate_limit_reset_ms": 1577482295132,
# "rate_limit": 120
# }
#
result = self.safe_value(response, 'result', {})
data = self.safe_value(result, 'data', [])
return self.parse_transactions(data, currency, since, limit, {'type': 'withdrawal'})
def parse_transaction_status(self, status):
statuses = {
'ToBeConfirmed': 'pending',
'UnderReview': 'pending',
'Pending': 'pending',
'Success': 'ok',
'CancelByUser': 'canceled',
'Reject': 'rejected',
'Expire': 'expired',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# fetchWithdrawals
#
# {
# "id": 137,
# "user_id": 1,
# "coin": "XRP", # Coin Enum
# "status": "Pending", # Withdraw Status Enum
# "amount": "20.00000000",
# "fee": "0.25000000",
# "address": "rH7H595XYEVTEHU2FySYsWnmfACBnZS9zM",
# "tx_id": "",
# "submited_at": "2019-06-11T02:20:24.000Z",
# "updated_at": "2019-06-11T02:20:24.000Z"
# }
#
# fetchDeposits ledger entries
#
# {
# "id": 234467,
# "user_id": 1,
# "coin": "BTC",
# "wallet_id": 27913,
# "type": "Realized P&L",
# "amount": "-0.00000006",
# "tx_id": "",
# "address": "BTCUSD",
# "wallet_balance": "0.03000330",
# "exec_time": "2019-12-09T00:00:25.000Z",
# "cross_seq": 0
# }
#
currencyId = self.safe_string(transaction, 'coin')
code = self.safe_currency_code(currencyId, currency)
timestamp = self.parse8601(self.safe_string_2(transaction, 'submited_at', 'exec_time'))
updated = self.parse8601(self.safe_string(transaction, 'updated_at'))
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
address = self.safe_string(transaction, 'address')
feeCost = self.safe_number(transaction, 'fee')
type = self.safe_string_lower(transaction, 'type')
fee = None
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
return {
'info': transaction,
'id': self.safe_string(transaction, 'id'),
'txid': self.safe_string(transaction, 'tx_id'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': address,
'addressTo': None,
'addressFrom': None,
'tag': None,
'tagTo': None,
'tagFrom': None,
'type': type,
'amount': self.safe_number(transaction, 'amount'),
'currency': code,
'status': status,
'updated': updated,
'fee': fee,
}
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'coin': currency['id'],
# 'currency': currency['id'], # alias
# 'start_date': self.iso8601(since),
# 'end_date': self.iso8601(till),
# 'wallet_fund_type': 'Deposit', # Withdraw, RealisedPNL, Commission, Refund, Prize, ExchangeOrderWithdraw, ExchangeOrderDeposit
# 'page': 1,
# 'limit': 20, # max 50
}
currency = None
if code is not None:
currency = self.currency(code)
request['coin'] = currency['id']
if since is not None:
request['start_date'] = self.yyyymmdd(since)
if limit is not None:
request['limit'] = limit
response = self.v2PrivateGetWalletFundRecords(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "data": [
# {
# "id": 234467,
# "user_id": 1,
# "coin": "BTC",
# "wallet_id": 27913,
# "type": "Realized P&L",
# "amount": "-0.00000006",
# "tx_id": "",
# "address": "BTCUSD",
# "wallet_balance": "0.03000330",
# "exec_time": "2019-12-09T00:00:25.000Z",
# "cross_seq": 0
# }
# ]
# },
# "ext_info": null,
# "time_now": "1577481867.115552",
# "rate_limit_status": 119,
# "rate_limit_reset_ms": 1577481867122,
# "rate_limit": 120
# }
#
result = self.safe_value(response, 'result', {})
data = self.safe_value(result, 'data', [])
return self.parse_ledger(data, currency, since, limit)
def parse_ledger_entry(self, item, currency=None):
#
# {
# "id": 234467,
# "user_id": 1,
# "coin": "BTC",
# "wallet_id": 27913,
# "type": "Realized P&L",
# "amount": "-0.00000006",
# "tx_id": "",
# "address": "BTCUSD",
# "wallet_balance": "0.03000330",
# "exec_time": "2019-12-09T00:00:25.000Z",
# "cross_seq": 0
# }
#
currencyId = self.safe_string(item, 'coin')
code = self.safe_currency_code(currencyId, currency)
amount = self.safe_number(item, 'amount')
after = self.safe_number(item, 'wallet_balance')
direction = 'out' if (amount < 0) else 'in'
before = None
if after is not None and amount is not None:
difference = amount if (direction == 'out') else -amount
before = self.sum(after, difference)
timestamp = self.parse8601(self.safe_string(item, 'exec_time'))
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
id = self.safe_string(item, 'id')
referenceId = self.safe_string(item, 'tx_id')
return {
'id': id,
'currency': code,
'account': self.safe_string(item, 'wallet_id'),
'referenceAccount': None,
'referenceId': referenceId,
'status': None,
'amount': amount,
'before': before,
'after': after,
'fee': None,
'direction': direction,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'type': type,
'info': item,
}
def parse_ledger_entry_type(self, type):
types = {
'Deposit': 'transaction',
'Withdraw': 'transaction',
'RealisedPNL': 'trade',
'Commission': 'fee',
'Refund': 'cashback',
'Prize': 'prize', # ?
'ExchangeOrderWithdraw': 'transaction',
'ExchangeOrderDeposit': 'transaction',
}
return self.safe_string(types, type, type)
def fetch_positions(self, symbols=None, params={}):
self.load_markets()
request = {}
if isinstance(symbols, list):
length = len(symbols)
if length != 1:
raise ArgumentsRequired(self.id + ' fetchPositions takes an array with exactly one symbol')
request['symbol'] = self.market_id(symbols[0])
defaultType = self.safe_string(self.options, 'defaultType', 'linear')
type = self.safe_string(params, 'type', defaultType)
params = self.omit(params, 'type')
response = None
if type == 'linear':
response = self.privateLinearGetPositionList(self.extend(request, params))
elif type == 'inverse':
response = self.v2PrivateGetPositionList(self.extend(request, params))
elif type == 'inverseFuture':
response = self.futuresPrivateGetPositionList(self.extend(request, params))
if (isinstance(response, basestring)) and self.is_json_encoded_object(response):
response = json.loads(response)
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [] or {} depending on the request
# }
#
return self.safe_value(response, 'result')
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
type = self.safe_string(api, 0)
section = self.safe_string(api, 1)
if type == 'spot':
if section == 'public':
section = 'v1'
else:
section += '/v1'
url = self.implode_hostname(self.urls['api'][type])
request = '/' + type + '/' + section + '/' + path
if (type == 'spot') or (type == 'quote'):
if params:
request += '?' + self.rawencode(params)
elif section == 'public':
if params:
request += '?' + self.rawencode(params)
elif type == 'public':
if params:
request += '?' + self.rawencode(params)
else:
self.check_required_credentials()
timestamp = self.nonce()
query = self.extend(params, {
'api_key': self.apiKey,
'recv_window': self.options['recvWindow'],
'timestamp': timestamp,
})
sortedQuery = self.keysort(query)
auth = self.rawencode(sortedQuery)
signature = self.hmac(self.encode(auth), self.encode(self.secret))
if method == 'POST':
body = self.json(self.extend(query, {
'sign': signature,
}))
headers = {
'Content-Type': 'application/json',
}
else:
request += '?' + self.urlencode(sortedQuery) + '&sign=' + signature
url += request
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if not response:
return # fallback to default error handler
#
# {
# ret_code: 10001,
# ret_msg: 'ReadMapCB: expect {or n, but found \u0000, error ' +
# 'found in #0 byte of ...||..., bigger context ' +
# '...||...',
# ext_code: '',
# ext_info: '',
# result: null,
# time_now: '1583934106.590436'
# }
#
errorCode = self.safe_string(response, 'ret_code')
if errorCode != '0':
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], body, feedback)
raise ExchangeError(feedback) # unknown message
def set_margin_mode(self, marginType, symbol=None, params={}):
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": null,
# "ext_info": null,
# "time_now": "1577477968.175013",
# "rate_limit_status": 74,
# "rate_limit_reset_ms": 1577477968183,
# "rate_limit": 75
# }
#
leverage = self.safe_value(params, 'leverage')
if leverage is None:
raise ArgumentsRequired(self.id + '.setMarginMode requires a leverage parameter')
marginType = marginType.upper()
if (marginType != 'ISOLATED') and (marginType != 'CROSSED'):
raise BadRequest(self.id + ' marginType must be either isolated or crossed')
self.load_markets()
market = self.market(symbol)
method = None
defaultType = self.safe_string(self.options, 'defaultType', 'linear')
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol, defaultType)
linear = market['linear'] or (marketType == 'linear')
inverse = (market['swap'] and market['inverse']) or (marketType == 'inverse')
futures = market['futures'] or (marketType == 'futures')
if linear:
method = 'privateLinearPostPositionSwitchIsolated'
elif inverse:
method = 'v2PrivatePostPositionSwitchIsolated'
elif futures:
method = 'privateFuturesPostPositionSwitchIsolated'
isIsolated = (marginType == 'ISOLATED')
request = {
'symbol': market['id'],
'is_isolated': isIsolated,
'buy_leverage': leverage,
'sell_leverage': leverage,
}
return getattr(self, method)(self.extend(request, params))
def set_leverage(self, leverage, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' setLeverage() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
# WARNING: THIS WILL INCREASE LIQUIDATION PRICE FOR OPEN ISOLATED LONG POSITIONS
# AND DECREASE LIQUIDATION PRICE FOR OPEN ISOLATED SHORT POSITIONS
defaultType = self.safe_string(self.options, 'defaultType', 'linear')
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol, defaultType)
linear = market['linear'] or (marketType == 'linear')
inverse = (market['swap'] and market['inverse']) or (marketType == 'inverse')
futures = market['futures'] or (marketType == 'futures')
method = None
if linear:
method = 'privateLinearPostPositionSetLeverage'
elif inverse:
method = 'v2PrivatePostPositionLeverageSave'
elif futures:
method = 'privateFuturesPostPositionLeverageSave'
buy_leverage = leverage
sell_leverage = leverage
if params['buy_leverage'] and params['sell_leverage'] and linear:
buy_leverage = params['buy_leverage']
sell_leverage = params['sell_leverage']
elif not leverage:
if linear:
raise ArgumentsRequired(self.id + ' setLeverage() requires either the parameter leverage or params["buy_leverage"] and params["sell_leverage"] for linear contracts')
else:
raise ArgumentsRequired(self.id + ' setLeverage() requires parameter leverage for inverse and futures contracts')
if (buy_leverage < 1) or (buy_leverage > 100) or (sell_leverage < 1) or (sell_leverage > 100):
raise BadRequest(self.id + ' leverage should be between 1 and 100')
request = {
'symbol': market['id'],
'leverage_only': True,
}
if not linear:
request['leverage'] = buy_leverage
else:
request['buy_leverage'] = buy_leverage
request['sell_leverage'] = sell_leverage
return getattr(self, method)(self.extend(request, params))
|
py | b410704fa597511c34a81c00f93307f9c566a39d | import time
from app import app, db, cache
from app.models import Machine
import app.utils as utils
@cache.cached(timeout=3)
# limit_time 为检测多久时间间隔内的机器 , offline_time 表示超过offline_time 时间,判断为失联
def get_machine_info(limit_time=60, offline_time=5):
begin_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - limit_time * 60))
res = db.session.execute("select * from machine where update_time > '%s' " % begin_time)
data = utils.convert_rowproxy_to_dict(res.fetchall())
for line in data:
# -1 表示机器关机了
if line['status'] == 3:
continue
tmp_update_time = time.mktime(time.strptime(line['update_time'].split('.')[0], "%Y-%m-%d %H:%M:%S"))
if (time.time() - offline_time * 60) > tmp_update_time:
line['status'] = -1
return data
|
py | b410706fa99870ab44d13944ab9596acf96d30dd | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class AspectConfidenceScoreLabel(msrest.serialization.Model):
"""Represents the confidence scores across all sentiment classes: positive, neutral, negative.
All required parameters must be populated in order to send to Azure.
:param positive: Required.
:type positive: float
:param negative: Required.
:type negative: float
"""
_validation = {
'positive': {'required': True},
'negative': {'required': True},
}
_attribute_map = {
'positive': {'key': 'positive', 'type': 'float'},
'negative': {'key': 'negative', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(AspectConfidenceScoreLabel, self).__init__(**kwargs)
self.positive = kwargs['positive']
self.negative = kwargs['negative']
class AspectRelation(msrest.serialization.Model):
"""AspectRelation.
All required parameters must be populated in order to send to Azure.
:param relation_type: Required. The type related to the aspect. Possible values include:
"opinion", "aspect".
:type relation_type: str or ~azure.ai.textanalytics.v3_1_preview_1.models.AspectRelationType
:param ref: Required. The JSON pointer indicating the linked object.
:type ref: str
"""
_validation = {
'relation_type': {'required': True},
'ref': {'required': True},
}
_attribute_map = {
'relation_type': {'key': 'relationType', 'type': 'str'},
'ref': {'key': 'ref', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AspectRelation, self).__init__(**kwargs)
self.relation_type = kwargs['relation_type']
self.ref = kwargs['ref']
class DetectedLanguage(msrest.serialization.Model):
"""DetectedLanguage.
All required parameters must be populated in order to send to Azure.
:param name: Required. Long name of a detected language (e.g. English, French).
:type name: str
:param iso6391_name: Required. A two letter representation of the detected language according
to the ISO 639-1 standard (e.g. en, fr).
:type iso6391_name: str
:param confidence_score: Required. A confidence score between 0 and 1. Scores close to 1
indicate 100% certainty that the identified language is true.
:type confidence_score: float
"""
_validation = {
'name': {'required': True},
'iso6391_name': {'required': True},
'confidence_score': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'iso6391_name': {'key': 'iso6391Name', 'type': 'str'},
'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(DetectedLanguage, self).__init__(**kwargs)
self.name = kwargs['name']
self.iso6391_name = kwargs['iso6391_name']
self.confidence_score = kwargs['confidence_score']
class DocumentEntities(msrest.serialization.Model):
"""DocumentEntities.
All required parameters must be populated in order to send to Azure.
:param id: Required. Unique, non-empty document identifier.
:type id: str
:param entities: Required. Recognized entities in the document.
:type entities: list[~azure.ai.textanalytics.v3_1_preview_1.models.Entity]
:param warnings: Required. Warnings encountered while processing document.
:type warnings: list[~azure.ai.textanalytics.v3_1_preview_1.models.TextAnalyticsWarning]
:param statistics: if showStats=true was specified in the request this field will contain
information about the document payload.
:type statistics: ~azure.ai.textanalytics.v3_1_preview_1.models.DocumentStatistics
"""
_validation = {
'id': {'required': True},
'entities': {'required': True},
'warnings': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'entities': {'key': 'entities', 'type': '[Entity]'},
'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
}
def __init__(
self,
**kwargs
):
super(DocumentEntities, self).__init__(**kwargs)
self.id = kwargs['id']
self.entities = kwargs['entities']
self.warnings = kwargs['warnings']
self.statistics = kwargs.get('statistics', None)
class DocumentError(msrest.serialization.Model):
"""DocumentError.
All required parameters must be populated in order to send to Azure.
:param id: Required. Document Id.
:type id: str
:param error: Required. Document Error.
:type error: ~azure.ai.textanalytics.v3_1_preview_1.models.TextAnalyticsError
"""
_validation = {
'id': {'required': True},
'error': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'error': {'key': 'error', 'type': 'TextAnalyticsError'},
}
def __init__(
self,
**kwargs
):
super(DocumentError, self).__init__(**kwargs)
self.id = kwargs['id']
self.error = kwargs['error']
class DocumentKeyPhrases(msrest.serialization.Model):
"""DocumentKeyPhrases.
All required parameters must be populated in order to send to Azure.
:param id: Required. Unique, non-empty document identifier.
:type id: str
:param key_phrases: Required. A list of representative words or phrases. The number of key
phrases returned is proportional to the number of words in the input document.
:type key_phrases: list[str]
:param warnings: Required. Warnings encountered while processing document.
:type warnings: list[~azure.ai.textanalytics.v3_1_preview_1.models.TextAnalyticsWarning]
:param statistics: if showStats=true was specified in the request this field will contain
information about the document payload.
:type statistics: ~azure.ai.textanalytics.v3_1_preview_1.models.DocumentStatistics
"""
_validation = {
'id': {'required': True},
'key_phrases': {'required': True},
'warnings': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'key_phrases': {'key': 'keyPhrases', 'type': '[str]'},
'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
}
def __init__(
self,
**kwargs
):
super(DocumentKeyPhrases, self).__init__(**kwargs)
self.id = kwargs['id']
self.key_phrases = kwargs['key_phrases']
self.warnings = kwargs['warnings']
self.statistics = kwargs.get('statistics', None)
class DocumentLanguage(msrest.serialization.Model):
"""DocumentLanguage.
All required parameters must be populated in order to send to Azure.
:param id: Required. Unique, non-empty document identifier.
:type id: str
:param detected_language: Required. Detected Language.
:type detected_language: ~azure.ai.textanalytics.v3_1_preview_1.models.DetectedLanguage
:param warnings: Required. Warnings encountered while processing document.
:type warnings: list[~azure.ai.textanalytics.v3_1_preview_1.models.TextAnalyticsWarning]
:param statistics: if showStats=true was specified in the request this field will contain
information about the document payload.
:type statistics: ~azure.ai.textanalytics.v3_1_preview_1.models.DocumentStatistics
"""
_validation = {
'id': {'required': True},
'detected_language': {'required': True},
'warnings': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'detected_language': {'key': 'detectedLanguage', 'type': 'DetectedLanguage'},
'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
}
def __init__(
self,
**kwargs
):
super(DocumentLanguage, self).__init__(**kwargs)
self.id = kwargs['id']
self.detected_language = kwargs['detected_language']
self.warnings = kwargs['warnings']
self.statistics = kwargs.get('statistics', None)
class DocumentLinkedEntities(msrest.serialization.Model):
"""DocumentLinkedEntities.
All required parameters must be populated in order to send to Azure.
:param id: Required. Unique, non-empty document identifier.
:type id: str
:param entities: Required. Recognized well-known entities in the document.
:type entities: list[~azure.ai.textanalytics.v3_1_preview_1.models.LinkedEntity]
:param warnings: Required. Warnings encountered while processing document.
:type warnings: list[~azure.ai.textanalytics.v3_1_preview_1.models.TextAnalyticsWarning]
:param statistics: if showStats=true was specified in the request this field will contain
information about the document payload.
:type statistics: ~azure.ai.textanalytics.v3_1_preview_1.models.DocumentStatistics
"""
_validation = {
'id': {'required': True},
'entities': {'required': True},
'warnings': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'entities': {'key': 'entities', 'type': '[LinkedEntity]'},
'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
}
def __init__(
self,
**kwargs
):
super(DocumentLinkedEntities, self).__init__(**kwargs)
self.id = kwargs['id']
self.entities = kwargs['entities']
self.warnings = kwargs['warnings']
self.statistics = kwargs.get('statistics', None)
class DocumentSentiment(msrest.serialization.Model):
"""DocumentSentiment.
All required parameters must be populated in order to send to Azure.
:param id: Required. Unique, non-empty document identifier.
:type id: str
:param sentiment: Required. Predicted sentiment for document (Negative, Neutral, Positive, or
Mixed). Possible values include: "positive", "neutral", "negative", "mixed".
:type sentiment: str or ~azure.ai.textanalytics.v3_1_preview_1.models.DocumentSentimentValue
:param statistics: if showStats=true was specified in the request this field will contain
information about the document payload.
:type statistics: ~azure.ai.textanalytics.v3_1_preview_1.models.DocumentStatistics
:param confidence_scores: Required. Document level sentiment confidence scores between 0 and 1
for each sentiment class.
:type confidence_scores:
~azure.ai.textanalytics.v3_1_preview_1.models.SentimentConfidenceScorePerLabel
:param sentences: Required. Sentence level sentiment analysis.
:type sentences: list[~azure.ai.textanalytics.v3_1_preview_1.models.SentenceSentiment]
:param warnings: Required. Warnings encountered while processing document.
:type warnings: list[~azure.ai.textanalytics.v3_1_preview_1.models.TextAnalyticsWarning]
"""
_validation = {
'id': {'required': True},
'sentiment': {'required': True},
'confidence_scores': {'required': True},
'sentences': {'required': True},
'warnings': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'sentiment': {'key': 'sentiment', 'type': 'str'},
'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
'confidence_scores': {'key': 'confidenceScores', 'type': 'SentimentConfidenceScorePerLabel'},
'sentences': {'key': 'sentences', 'type': '[SentenceSentiment]'},
'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
}
def __init__(
self,
**kwargs
):
super(DocumentSentiment, self).__init__(**kwargs)
self.id = kwargs['id']
self.sentiment = kwargs['sentiment']
self.statistics = kwargs.get('statistics', None)
self.confidence_scores = kwargs['confidence_scores']
self.sentences = kwargs['sentences']
self.warnings = kwargs['warnings']
class DocumentStatistics(msrest.serialization.Model):
"""if showStats=true was specified in the request this field will contain information about the document payload.
All required parameters must be populated in order to send to Azure.
:param characters_count: Required. Number of text elements recognized in the document.
:type characters_count: int
:param transactions_count: Required. Number of transactions for the document.
:type transactions_count: int
"""
_validation = {
'characters_count': {'required': True},
'transactions_count': {'required': True},
}
_attribute_map = {
'characters_count': {'key': 'charactersCount', 'type': 'int'},
'transactions_count': {'key': 'transactionsCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(DocumentStatistics, self).__init__(**kwargs)
self.characters_count = kwargs['characters_count']
self.transactions_count = kwargs['transactions_count']
class EntitiesResult(msrest.serialization.Model):
"""EntitiesResult.
All required parameters must be populated in order to send to Azure.
:param documents: Required. Response by document.
:type documents: list[~azure.ai.textanalytics.v3_1_preview_1.models.DocumentEntities]
:param errors: Required. Errors by document id.
:type errors: list[~azure.ai.textanalytics.v3_1_preview_1.models.DocumentError]
:param statistics: if showStats=true was specified in the request this field will contain
information about the request payload.
:type statistics: ~azure.ai.textanalytics.v3_1_preview_1.models.RequestStatistics
:param model_version: Required. This field indicates which model is used for scoring.
:type model_version: str
"""
_validation = {
'documents': {'required': True},
'errors': {'required': True},
'model_version': {'required': True},
}
_attribute_map = {
'documents': {'key': 'documents', 'type': '[DocumentEntities]'},
'errors': {'key': 'errors', 'type': '[DocumentError]'},
'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
'model_version': {'key': 'modelVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EntitiesResult, self).__init__(**kwargs)
self.documents = kwargs['documents']
self.errors = kwargs['errors']
self.statistics = kwargs.get('statistics', None)
self.model_version = kwargs['model_version']
class Entity(msrest.serialization.Model):
"""Entity.
All required parameters must be populated in order to send to Azure.
:param text: Required. Entity text as appears in the request.
:type text: str
:param category: Required. Entity type, such as Person/Location/Org/SSN etc.
:type category: str
:param subcategory: Entity sub type, such as Age/Year/TimeRange etc.
:type subcategory: str
:param offset: Required. Start position for the entity text.
:type offset: int
:param length: Required. Length for the entity text.
:type length: int
:param confidence_score: Required. Confidence score between 0 and 1 of the extracted entity.
:type confidence_score: float
"""
_validation = {
'text': {'required': True},
'category': {'required': True},
'offset': {'required': True},
'length': {'required': True},
'confidence_score': {'required': True},
}
_attribute_map = {
'text': {'key': 'text', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
'subcategory': {'key': 'subcategory', 'type': 'str'},
'offset': {'key': 'offset', 'type': 'int'},
'length': {'key': 'length', 'type': 'int'},
'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(Entity, self).__init__(**kwargs)
self.text = kwargs['text']
self.category = kwargs['category']
self.subcategory = kwargs.get('subcategory', None)
self.offset = kwargs['offset']
self.length = kwargs['length']
self.confidence_score = kwargs['confidence_score']
class EntityLinkingResult(msrest.serialization.Model):
"""EntityLinkingResult.
All required parameters must be populated in order to send to Azure.
:param documents: Required. Response by document.
:type documents: list[~azure.ai.textanalytics.v3_1_preview_1.models.DocumentLinkedEntities]
:param errors: Required. Errors by document id.
:type errors: list[~azure.ai.textanalytics.v3_1_preview_1.models.DocumentError]
:param statistics: if showStats=true was specified in the request this field will contain
information about the request payload.
:type statistics: ~azure.ai.textanalytics.v3_1_preview_1.models.RequestStatistics
:param model_version: Required. This field indicates which model is used for scoring.
:type model_version: str
"""
_validation = {
'documents': {'required': True},
'errors': {'required': True},
'model_version': {'required': True},
}
_attribute_map = {
'documents': {'key': 'documents', 'type': '[DocumentLinkedEntities]'},
'errors': {'key': 'errors', 'type': '[DocumentError]'},
'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
'model_version': {'key': 'modelVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EntityLinkingResult, self).__init__(**kwargs)
self.documents = kwargs['documents']
self.errors = kwargs['errors']
self.statistics = kwargs.get('statistics', None)
self.model_version = kwargs['model_version']
class InnerError(msrest.serialization.Model):
"""InnerError.
All required parameters must be populated in order to send to Azure.
:param code: Required. Error code. Possible values include: "invalidParameterValue",
"invalidRequestBodyFormat", "emptyRequest", "missingInputRecords", "invalidDocument",
"modelVersionIncorrect", "invalidDocumentBatch", "unsupportedLanguageCode",
"invalidCountryHint".
:type code: str or ~azure.ai.textanalytics.v3_1_preview_1.models.InnerErrorCodeValue
:param message: Required. Error message.
:type message: str
:param details: Error details.
:type details: dict[str, str]
:param target: Error target.
:type target: str
:param innererror: Inner error contains more specific information.
:type innererror: ~azure.ai.textanalytics.v3_1_preview_1.models.InnerError
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '{str}'},
'target': {'key': 'target', 'type': 'str'},
'innererror': {'key': 'innererror', 'type': 'InnerError'},
}
def __init__(
self,
**kwargs
):
super(InnerError, self).__init__(**kwargs)
self.code = kwargs['code']
self.message = kwargs['message']
self.details = kwargs.get('details', None)
self.target = kwargs.get('target', None)
self.innererror = kwargs.get('innererror', None)
class KeyPhraseResult(msrest.serialization.Model):
"""KeyPhraseResult.
All required parameters must be populated in order to send to Azure.
:param documents: Required. Response by document.
:type documents: list[~azure.ai.textanalytics.v3_1_preview_1.models.DocumentKeyPhrases]
:param errors: Required. Errors by document id.
:type errors: list[~azure.ai.textanalytics.v3_1_preview_1.models.DocumentError]
:param statistics: if showStats=true was specified in the request this field will contain
information about the request payload.
:type statistics: ~azure.ai.textanalytics.v3_1_preview_1.models.RequestStatistics
:param model_version: Required. This field indicates which model is used for scoring.
:type model_version: str
"""
_validation = {
'documents': {'required': True},
'errors': {'required': True},
'model_version': {'required': True},
}
_attribute_map = {
'documents': {'key': 'documents', 'type': '[DocumentKeyPhrases]'},
'errors': {'key': 'errors', 'type': '[DocumentError]'},
'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
'model_version': {'key': 'modelVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(KeyPhraseResult, self).__init__(**kwargs)
self.documents = kwargs['documents']
self.errors = kwargs['errors']
self.statistics = kwargs.get('statistics', None)
self.model_version = kwargs['model_version']
class LanguageBatchInput(msrest.serialization.Model):
"""LanguageBatchInput.
All required parameters must be populated in order to send to Azure.
:param documents: Required.
:type documents: list[~azure.ai.textanalytics.v3_1_preview_1.models.LanguageInput]
"""
_validation = {
'documents': {'required': True},
}
_attribute_map = {
'documents': {'key': 'documents', 'type': '[LanguageInput]'},
}
def __init__(
self,
**kwargs
):
super(LanguageBatchInput, self).__init__(**kwargs)
self.documents = kwargs['documents']
class LanguageInput(msrest.serialization.Model):
"""LanguageInput.
All required parameters must be populated in order to send to Azure.
:param id: Required. Unique, non-empty document identifier.
:type id: str
:param text: Required.
:type text: str
:param country_hint:
:type country_hint: str
"""
_validation = {
'id': {'required': True},
'text': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'text': {'key': 'text', 'type': 'str'},
'country_hint': {'key': 'countryHint', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LanguageInput, self).__init__(**kwargs)
self.id = kwargs['id']
self.text = kwargs['text']
self.country_hint = kwargs.get('country_hint', None)
class LanguageResult(msrest.serialization.Model):
"""LanguageResult.
All required parameters must be populated in order to send to Azure.
:param documents: Required. Response by document.
:type documents: list[~azure.ai.textanalytics.v3_1_preview_1.models.DocumentLanguage]
:param errors: Required. Errors by document id.
:type errors: list[~azure.ai.textanalytics.v3_1_preview_1.models.DocumentError]
:param statistics: if showStats=true was specified in the request this field will contain
information about the request payload.
:type statistics: ~azure.ai.textanalytics.v3_1_preview_1.models.RequestStatistics
:param model_version: Required. This field indicates which model is used for scoring.
:type model_version: str
"""
_validation = {
'documents': {'required': True},
'errors': {'required': True},
'model_version': {'required': True},
}
_attribute_map = {
'documents': {'key': 'documents', 'type': '[DocumentLanguage]'},
'errors': {'key': 'errors', 'type': '[DocumentError]'},
'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
'model_version': {'key': 'modelVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LanguageResult, self).__init__(**kwargs)
self.documents = kwargs['documents']
self.errors = kwargs['errors']
self.statistics = kwargs.get('statistics', None)
self.model_version = kwargs['model_version']
class LinkedEntity(msrest.serialization.Model):
"""LinkedEntity.
All required parameters must be populated in order to send to Azure.
:param name: Required. Entity Linking formal name.
:type name: str
:param matches: Required. List of instances this entity appears in the text.
:type matches: list[~azure.ai.textanalytics.v3_1_preview_1.models.Match]
:param language: Required. Language used in the data source.
:type language: str
:param id: Unique identifier of the recognized entity from the data source.
:type id: str
:param url: Required. URL for the entity's page from the data source.
:type url: str
:param data_source: Required. Data source used to extract entity linking, such as Wiki/Bing
etc.
:type data_source: str
"""
_validation = {
'name': {'required': True},
'matches': {'required': True},
'language': {'required': True},
'url': {'required': True},
'data_source': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'matches': {'key': 'matches', 'type': '[Match]'},
'language': {'key': 'language', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'data_source': {'key': 'dataSource', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LinkedEntity, self).__init__(**kwargs)
self.name = kwargs['name']
self.matches = kwargs['matches']
self.language = kwargs['language']
self.id = kwargs.get('id', None)
self.url = kwargs['url']
self.data_source = kwargs['data_source']
class Match(msrest.serialization.Model):
"""Match.
All required parameters must be populated in order to send to Azure.
:param confidence_score: Required. If a well-known item is recognized, a decimal number
denoting the confidence level between 0 and 1 will be returned.
:type confidence_score: float
:param text: Required. Entity text as appears in the request.
:type text: str
:param offset: Required. Start position for the entity match text.
:type offset: int
:param length: Required. Length for the entity match text.
:type length: int
"""
_validation = {
'confidence_score': {'required': True},
'text': {'required': True},
'offset': {'required': True},
'length': {'required': True},
}
_attribute_map = {
'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
'text': {'key': 'text', 'type': 'str'},
'offset': {'key': 'offset', 'type': 'int'},
'length': {'key': 'length', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(Match, self).__init__(**kwargs)
self.confidence_score = kwargs['confidence_score']
self.text = kwargs['text']
self.offset = kwargs['offset']
self.length = kwargs['length']
class MultiLanguageBatchInput(msrest.serialization.Model):
"""Contains a set of input documents to be analyzed by the service.
All required parameters must be populated in order to send to Azure.
:param documents: Required. The set of documents to process as part of this batch.
:type documents: list[~azure.ai.textanalytics.v3_1_preview_1.models.MultiLanguageInput]
"""
_validation = {
'documents': {'required': True},
}
_attribute_map = {
'documents': {'key': 'documents', 'type': '[MultiLanguageInput]'},
}
def __init__(
self,
**kwargs
):
super(MultiLanguageBatchInput, self).__init__(**kwargs)
self.documents = kwargs['documents']
class MultiLanguageInput(msrest.serialization.Model):
"""Contains an input document to be analyzed by the service.
All required parameters must be populated in order to send to Azure.
:param id: Required. A unique, non-empty document identifier.
:type id: str
:param text: Required. The input text to process.
:type text: str
:param language: (Optional) This is the 2 letter ISO 639-1 representation of a language. For
example, use "en" for English; "es" for Spanish etc. If not set, use "en" for English as
default.
:type language: str
"""
_validation = {
'id': {'required': True},
'text': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'text': {'key': 'text', 'type': 'str'},
'language': {'key': 'language', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MultiLanguageInput, self).__init__(**kwargs)
self.id = kwargs['id']
self.text = kwargs['text']
self.language = kwargs.get('language', None)
class RequestStatistics(msrest.serialization.Model):
"""if showStats=true was specified in the request this field will contain information about the request payload.
All required parameters must be populated in order to send to Azure.
:param documents_count: Required. Number of documents submitted in the request.
:type documents_count: int
:param valid_documents_count: Required. Number of valid documents. This excludes empty, over-
size limit or non-supported languages documents.
:type valid_documents_count: int
:param erroneous_documents_count: Required. Number of invalid documents. This includes empty,
over-size limit or non-supported languages documents.
:type erroneous_documents_count: int
:param transactions_count: Required. Number of transactions for the request.
:type transactions_count: long
"""
_validation = {
'documents_count': {'required': True},
'valid_documents_count': {'required': True},
'erroneous_documents_count': {'required': True},
'transactions_count': {'required': True},
}
_attribute_map = {
'documents_count': {'key': 'documentsCount', 'type': 'int'},
'valid_documents_count': {'key': 'validDocumentsCount', 'type': 'int'},
'erroneous_documents_count': {'key': 'erroneousDocumentsCount', 'type': 'int'},
'transactions_count': {'key': 'transactionsCount', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(RequestStatistics, self).__init__(**kwargs)
self.documents_count = kwargs['documents_count']
self.valid_documents_count = kwargs['valid_documents_count']
self.erroneous_documents_count = kwargs['erroneous_documents_count']
self.transactions_count = kwargs['transactions_count']
class SentenceAspect(msrest.serialization.Model):
"""SentenceAspect.
All required parameters must be populated in order to send to Azure.
:param sentiment: Required. Aspect level sentiment for the aspect in the sentence. Possible
values include: "positive", "mixed", "negative".
:type sentiment: str or ~azure.ai.textanalytics.v3_1_preview_1.models.SentenceAspectSentiment
:param confidence_scores: Required. Aspect level sentiment confidence scores for the aspect in
the sentence.
:type confidence_scores:
~azure.ai.textanalytics.v3_1_preview_1.models.AspectConfidenceScoreLabel
:param offset: Required. The aspect offset from the start of the sentence.
:type offset: int
:param length: Required. The length of the aspect.
:type length: int
:param text: Required. The aspect text detected.
:type text: str
:param relations: Required. The array of either opinion or aspect object which is related to
the aspect.
:type relations: list[~azure.ai.textanalytics.v3_1_preview_1.models.AspectRelation]
"""
_validation = {
'sentiment': {'required': True},
'confidence_scores': {'required': True},
'offset': {'required': True},
'length': {'required': True},
'text': {'required': True},
'relations': {'required': True},
}
_attribute_map = {
'sentiment': {'key': 'sentiment', 'type': 'str'},
'confidence_scores': {'key': 'confidenceScores', 'type': 'AspectConfidenceScoreLabel'},
'offset': {'key': 'offset', 'type': 'int'},
'length': {'key': 'length', 'type': 'int'},
'text': {'key': 'text', 'type': 'str'},
'relations': {'key': 'relations', 'type': '[AspectRelation]'},
}
def __init__(
self,
**kwargs
):
super(SentenceAspect, self).__init__(**kwargs)
self.sentiment = kwargs['sentiment']
self.confidence_scores = kwargs['confidence_scores']
self.offset = kwargs['offset']
self.length = kwargs['length']
self.text = kwargs['text']
self.relations = kwargs['relations']
class SentenceOpinion(msrest.serialization.Model):
"""SentenceOpinion.
All required parameters must be populated in order to send to Azure.
:param sentiment: Required. Opinion level sentiment for the aspect in the sentence. Possible
values include: "positive", "mixed", "negative".
:type sentiment: str or ~azure.ai.textanalytics.v3_1_preview_1.models.SentenceOpinionSentiment
:param confidence_scores: Required. Opinion level sentiment confidence scores for the aspect in
the sentence.
:type confidence_scores:
~azure.ai.textanalytics.v3_1_preview_1.models.AspectConfidenceScoreLabel
:param offset: Required. The opinion offset from the start of the sentence.
:type offset: int
:param length: Required. The length of the opinion.
:type length: int
:param text: Required. The aspect text detected.
:type text: str
:param is_negated: Required. The indicator representing if the opinion is negated.
:type is_negated: bool
"""
_validation = {
'sentiment': {'required': True},
'confidence_scores': {'required': True},
'offset': {'required': True},
'length': {'required': True},
'text': {'required': True},
'is_negated': {'required': True},
}
_attribute_map = {
'sentiment': {'key': 'sentiment', 'type': 'str'},
'confidence_scores': {'key': 'confidenceScores', 'type': 'AspectConfidenceScoreLabel'},
'offset': {'key': 'offset', 'type': 'int'},
'length': {'key': 'length', 'type': 'int'},
'text': {'key': 'text', 'type': 'str'},
'is_negated': {'key': 'isNegated', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(SentenceOpinion, self).__init__(**kwargs)
self.sentiment = kwargs['sentiment']
self.confidence_scores = kwargs['confidence_scores']
self.offset = kwargs['offset']
self.length = kwargs['length']
self.text = kwargs['text']
self.is_negated = kwargs['is_negated']
class SentenceSentiment(msrest.serialization.Model):
"""SentenceSentiment.
All required parameters must be populated in order to send to Azure.
:param text: Required. The sentence text.
:type text: str
:param sentiment: Required. The predicted Sentiment for the sentence. Possible values include:
"positive", "neutral", "negative".
:type sentiment: str or ~azure.ai.textanalytics.v3_1_preview_1.models.SentenceSentimentValue
:param confidence_scores: Required. The sentiment confidence score between 0 and 1 for the
sentence for all classes.
:type confidence_scores:
~azure.ai.textanalytics.v3_1_preview_1.models.SentimentConfidenceScorePerLabel
:param offset: Required. The sentence offset from the start of the document.
:type offset: int
:param length: Required. The length of the sentence.
:type length: int
:param aspects: The array of aspect object for the sentence.
:type aspects: list[~azure.ai.textanalytics.v3_1_preview_1.models.SentenceAspect]
:param opinions: The array of opinion object for the sentence.
:type opinions: list[~azure.ai.textanalytics.v3_1_preview_1.models.SentenceOpinion]
"""
_validation = {
'text': {'required': True},
'sentiment': {'required': True},
'confidence_scores': {'required': True},
'offset': {'required': True},
'length': {'required': True},
}
_attribute_map = {
'text': {'key': 'text', 'type': 'str'},
'sentiment': {'key': 'sentiment', 'type': 'str'},
'confidence_scores': {'key': 'confidenceScores', 'type': 'SentimentConfidenceScorePerLabel'},
'offset': {'key': 'offset', 'type': 'int'},
'length': {'key': 'length', 'type': 'int'},
'aspects': {'key': 'aspects', 'type': '[SentenceAspect]'},
'opinions': {'key': 'opinions', 'type': '[SentenceOpinion]'},
}
def __init__(
self,
**kwargs
):
super(SentenceSentiment, self).__init__(**kwargs)
self.text = kwargs['text']
self.sentiment = kwargs['sentiment']
self.confidence_scores = kwargs['confidence_scores']
self.offset = kwargs['offset']
self.length = kwargs['length']
self.aspects = kwargs.get('aspects', None)
self.opinions = kwargs.get('opinions', None)
class SentimentConfidenceScorePerLabel(msrest.serialization.Model):
"""Represents the confidence scores between 0 and 1 across all sentiment classes: positive, neutral, negative.
All required parameters must be populated in order to send to Azure.
:param positive: Required.
:type positive: float
:param neutral: Required.
:type neutral: float
:param negative: Required.
:type negative: float
"""
_validation = {
'positive': {'required': True},
'neutral': {'required': True},
'negative': {'required': True},
}
_attribute_map = {
'positive': {'key': 'positive', 'type': 'float'},
'neutral': {'key': 'neutral', 'type': 'float'},
'negative': {'key': 'negative', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(SentimentConfidenceScorePerLabel, self).__init__(**kwargs)
self.positive = kwargs['positive']
self.neutral = kwargs['neutral']
self.negative = kwargs['negative']
class SentimentResponse(msrest.serialization.Model):
"""SentimentResponse.
All required parameters must be populated in order to send to Azure.
:param documents: Required. Sentiment analysis per document.
:type documents: list[~azure.ai.textanalytics.v3_1_preview_1.models.DocumentSentiment]
:param errors: Required. Errors by document id.
:type errors: list[~azure.ai.textanalytics.v3_1_preview_1.models.DocumentError]
:param statistics: if showStats=true was specified in the request this field will contain
information about the request payload.
:type statistics: ~azure.ai.textanalytics.v3_1_preview_1.models.RequestStatistics
:param model_version: Required. This field indicates which model is used for scoring.
:type model_version: str
"""
_validation = {
'documents': {'required': True},
'errors': {'required': True},
'model_version': {'required': True},
}
_attribute_map = {
'documents': {'key': 'documents', 'type': '[DocumentSentiment]'},
'errors': {'key': 'errors', 'type': '[DocumentError]'},
'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
'model_version': {'key': 'modelVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SentimentResponse, self).__init__(**kwargs)
self.documents = kwargs['documents']
self.errors = kwargs['errors']
self.statistics = kwargs.get('statistics', None)
self.model_version = kwargs['model_version']
class TextAnalyticsError(msrest.serialization.Model):
"""TextAnalyticsError.
All required parameters must be populated in order to send to Azure.
:param code: Required. Error code. Possible values include: "invalidRequest",
"invalidArgument", "internalServerError", "serviceUnavailable".
:type code: str or ~azure.ai.textanalytics.v3_1_preview_1.models.ErrorCodeValue
:param message: Required. Error message.
:type message: str
:param target: Error target.
:type target: str
:param innererror: Inner error contains more specific information.
:type innererror: ~azure.ai.textanalytics.v3_1_preview_1.models.InnerError
:param details: Details about specific errors that led to this reported error.
:type details: list[~azure.ai.textanalytics.v3_1_preview_1.models.TextAnalyticsError]
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'innererror': {'key': 'innererror', 'type': 'InnerError'},
'details': {'key': 'details', 'type': '[TextAnalyticsError]'},
}
def __init__(
self,
**kwargs
):
super(TextAnalyticsError, self).__init__(**kwargs)
self.code = kwargs['code']
self.message = kwargs['message']
self.target = kwargs.get('target', None)
self.innererror = kwargs.get('innererror', None)
self.details = kwargs.get('details', None)
class TextAnalyticsWarning(msrest.serialization.Model):
"""TextAnalyticsWarning.
All required parameters must be populated in order to send to Azure.
:param code: Required. Error code. Possible values include: "LongWordsInDocument",
"DocumentTruncated".
:type code: str or ~azure.ai.textanalytics.v3_1_preview_1.models.WarningCodeValue
:param message: Required. Warning message.
:type message: str
:param target_ref: A JSON pointer reference indicating the target object.
:type target_ref: str
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target_ref': {'key': 'targetRef', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TextAnalyticsWarning, self).__init__(**kwargs)
self.code = kwargs['code']
self.message = kwargs['message']
self.target_ref = kwargs.get('target_ref', None)
|
py | b410716cf0f106fbb3d416d7b12564ca3345d28e | r"""
Finite posets
Here is some terminology used in this file:
- An *order filter* (or *upper set*) of a poset `P` is a subset `S` of `P`
such that if `x \leq y` and `x\in S` then `y\in S`.
- An *order ideal* (or *lower set*) of a poset `P` is a subset `S` of `P`
such that if `x \leq y` and `y\in S` then `x\in S`.
"""
#*****************************************************************************
# Copyright (C) 2011 Nicolas M. Thiery <nthiery at users.sf.net>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.misc.abstract_method import abstract_method
from sage.categories.category_with_axiom import CategoryWithAxiom
class FinitePosets(CategoryWithAxiom):
r"""
The category of finite posets i.e. finite sets with a partial
order structure.
EXAMPLES::
sage: FinitePosets()
Category of finite posets
sage: FinitePosets().super_categories()
[Category of posets, Category of finite sets]
sage: FinitePosets().example()
NotImplemented
.. seealso:: :class:`~sage.categories.posets.Posets`, :func:`Poset`
TESTS::
sage: C = FinitePosets()
sage: C is Posets().Finite()
True
sage: TestSuite(C).run()
"""
class ParentMethods:
##########################################################################
# Properties of this poset
def is_lattice(self):
r"""
Returns whether this poset is both a meet and a join semilattice.
EXAMPLES::
sage: P = Poset([[1,3,2],[4],[4,5,6],[6],[7],[7],[7],[]])
sage: P.is_lattice()
True
sage: P = Poset([[1,2],[3],[3],[]])
sage: P.is_lattice()
True
sage: P = Poset({0:[2,3],1:[2,3]})
sage: P.is_lattice()
False
"""
return (self.cardinality() == 0 or
(self.has_bottom() and self.is_join_semilattice()))
def is_selfdual(self):
r"""
Returns whether this poset is *self-dual*, that is
isomorphic to its dual poset.
EXAMPLES::
sage: P = Poset(([1,2,3],[[1,3],[2,3]]),cover_relations=True)
sage: P.is_selfdual()
False
sage: P = Poset(([1,2,3,4],[[1,3],[1,4],[2,3],[2,4]]),cover_relations=True)
sage: P.is_selfdual()
True
sage: P = Poset( {} )
sage: P.is_selfdual()
True
"""
# Two quick checks before full isomorphic test.
if sorted(self._hasse_diagram.in_degree()) != sorted(self._hasse_diagram.out_degree()):
return False
levels_orig=[len(x) for x in self._hasse_diagram.level_sets()]
dual_poset_hasse=self._hasse_diagram.reverse()
levels_dual=[len(x) for x in dual_poset_hasse.level_sets()]
if levels_orig != levels_dual:
return False
return self._hasse_diagram.is_isomorphic(dual_poset_hasse)
##########################################################################
# Properties of morphisms
def is_poset_isomorphism(self, f, codomain):
r"""
Return whether `f` is an isomorphism of posets from
``self`` to ``codomain``.
INPUT:
- ``f`` -- a function from ``self`` to ``codomain``
- ``codomain`` -- a poset
EXAMPLES:
We build the poset `D` of divisors of 30, and check that
it is isomorphic to the boolean lattice `B` of the subsets
of `\{2,3,5\}` ordered by inclusion, via the reverse
function `f: B \to D, b \mapsto \prod_{x\in b} x`::
sage: D = Poset((divisors(30), attrcall("divides")))
sage: B = Poset(([frozenset(s) for s in Subsets([2,3,5])], attrcall("issubset")))
sage: def f(b): return D(prod(b))
sage: B.is_poset_isomorphism(f, D)
True
On the other hand, `f` is not an isomorphism to the chain
of divisors of 30, ordered by usual comparison::
sage: P = Poset((divisors(30), operator.le))
sage: def f(b): return P(prod(b))
sage: B.is_poset_isomorphism(f, P)
False
A non surjective case::
sage: B = Poset(([frozenset(s) for s in Subsets([2,3])], attrcall("issubset")))
sage: def f(b): return D(prod(b))
sage: B.is_poset_isomorphism(f, D)
False
A non injective case::
sage: B = Poset(([frozenset(s) for s in Subsets([2,3,5,6])], attrcall("issubset")))
sage: def f(b): return D(gcd(prod(b), 30))
sage: B.is_poset_isomorphism(f, D)
False
.. note:: since ``D`` and ``B`` are not facade posets, ``f`` is
responsible for the conversions between integers and subsets to
elements of ``D`` and ``B`` and back.
.. seealso:: :meth:`FiniteLatticePosets.ParentMethods.is_lattice_morphism`
"""
image = set(f(x) for x in self)
if len(image) != self.cardinality():
# Not injective
return False
if len(image) != codomain.cardinality():
# Not surjective
return False
for x in self:
if set(f(y) for y in self.upper_covers(x)) != set(codomain.upper_covers(f(x))):
return False
return True
def is_poset_morphism(self, f, codomain):
r"""
Return whether `f` is a morphism of posets from ``self``
to ``codomain``, that is
.. MATH::
x\leq y \Longrightarrow f(x) \leq f(y)
for all `x` and `y` in ``self``.
INPUT:
- ``f`` -- a function from ``self`` to ``codomain``
- ``codomain`` -- a poset
EXAMPLES:
We build the boolean lattice of the subsets of
`\{2,3,5,6\}` and the lattice of divisors of `30`, and
check that the map `b \mapsto \gcd(\prod_{x\in b} x, 30)`
is a morphism of posets::
sage: D = Poset((divisors(30), attrcall("divides")))
sage: B = Poset(([frozenset(s) for s in Subsets([2,3,5,6])], attrcall("issubset")))
sage: def f(b): return D(gcd(prod(b), 30))
sage: B.is_poset_morphism(f, D)
True
.. note:: since ``D`` and ``B`` are not facade posets, ``f`` is responsible
for the conversions between integers and subsets to elements of
``D`` and ``B`` and back.
`f` is also a morphism of posets to the chain of divisors
of 30, ordered by usual comparison::
sage: P = Poset((divisors(30), operator.le))
sage: def f(b): return P(gcd(prod(b), 30))
sage: B.is_poset_morphism(f, P)
True
FIXME: should this be ``is_order_preserving_morphism``?
.. seealso:: :meth:`is_poset_isomorphism`
TESTS:
Base cases::
sage: P = Posets.ChainPoset(2)
sage: Q = Posets.AntichainPoset(2)
sage: f = lambda x: 1-x
sage: P.is_poset_morphism(f, P)
False
sage: P.is_poset_morphism(f, Q)
False
sage: Q.is_poset_morphism(f, Q)
True
sage: Q.is_poset_morphism(f, P)
True
sage: P = Poset(); P
Finite poset containing 0 elements
sage: P.is_poset_morphism(f, P)
True
"""
for x in self:
for y in self.upper_covers(x):
if not codomain.is_lequal(f(x),f(y)):
return False
return True
##########################################################################
# About order ideals, order filters and the like
def order_ideal_generators(self, ideal, direction='down'):
r"""
Return the antichain of (minimal) generators of the order
ideal (resp. order filter) ``ideal``.
INPUT:
- ``ideal`` -- an order ideal `I` (resp. order filter)
of ``self``, as a list (or iterable); this should be
an order ideal if ``direction`` is set to ``'down'``,
and an order filter if ``direction`` is set to
``'up'``.
- ``direction`` -- ``'up'`` or ``'down'`` (default:
``'down'``).
The antichain of (minimal) generators of an order ideal
`I` in a poset `P` is the set of all minimal elements of
`P`. In the case of an order filter, the definition is
similar, but with "maximal" used instead of "minimal".
EXAMPLES:
We build the boolean lattice of all subsets of `\{1,2,3\}`
ordered by inclusion, and compute an order ideal there::
sage: P = Poset((Subsets([1,2,3]), attrcall("issubset")))
sage: I = P.order_ideal([Set([1,2]), Set([2,3]), Set([1])]); I
[{}, {3}, {2}, {2, 3}, {1}, {1, 2}]
Then, we retrieve the generators of this ideal::
sage: P.order_ideal_generators(I)
{{1, 2}, {2, 3}}
If ``direction`` is 'up', then this instead computes
the minimal generators for an order filter::
sage: I = P.order_filter([Set([1,2]), Set([2,3]), Set([1])]); I
[{2, 3}, {1}, {1, 2}, {1, 3}, {1, 2, 3}]
sage: P.order_ideal_generators(I, direction='up')
{{2, 3}, {1}}
Complexity: `O(n+m)` where `n` is the cardinality of `I`,
and `m` the number of upper covers of elements of `I`.
"""
if direction == 'down':
covers = self.upper_covers
else:
covers = self.lower_covers
ideal_as_set = set(ideal)
from sage.sets.set import Set
return Set(x for x in ideal if all(y not in ideal_as_set
for y in covers(x)))
def order_filter_generators(self, filter):
r"""
Generators for an order filter
INPUT:
- ``filter`` -- an order filter of ``self``, as a list (or iterable)
EXAMPLES::
sage: P = Poset((Subsets([1,2,3]), attrcall("issubset")))
sage: I = P.order_filter([Set([1,2]), Set([2,3]), Set([1])]); I
[{2, 3}, {1}, {1, 2}, {1, 3}, {1, 2, 3}]
sage: P.order_filter_generators(I)
{{2, 3}, {1}}
.. seealso:: :meth:`order_ideal_generators`
"""
return self.order_ideal_generators(filter, direction='up')
def order_ideal_complement_generators(self, antichain, direction='up'):
r"""
Return the Panyushev complement of the antichain
``antichain``.
Given an antichain `A` of a poset `P`, the Panyushev
complement of `A` is defined to be the antichain consisting
of the minimal elements of the order filter `B`, where `B`
is the (set-theoretic) complement of the order ideal of
`P` generated by `A`.
Setting the optional keyword variable ``direction`` to
``'down'`` leads to the inverse Panyushev complement being
computed instead of the Panyushev complement. The inverse
Panyushev complement of an antichain `A` is the antichain
whose Panyushev complement is `A`. It can be found as the
antichain consisting of the maximal elements of the order
ideal `C`, where `C` is the (set-theoretic) complement of
the order filter of `P` generated by `A`.
:meth:`panyushev_complement` is an alias for this method.
Panyushev complementation is related (actually, isomorphic)
to rowmotion (:meth:`rowmotion`).
INPUT:
- ``antichain`` -- an antichain of ``self``, as a list (or
iterable), or, more generally, generators of an order ideal
(resp. order filter)
- ``direction`` -- 'up' or 'down' (default: 'up')
OUTPUT:
- the generating antichain of the complement order filter
(resp. order ideal) of the order ideal (resp. order filter)
generated by the antichain ``antichain``
EXAMPLES::
sage: P = Poset( ( [1,2,3], [ [1,3], [2,3] ] ) )
sage: P.order_ideal_complement_generators([1])
{2}
sage: P.order_ideal_complement_generators([3])
set()
sage: P.order_ideal_complement_generators([1,2])
{3}
sage: P.order_ideal_complement_generators([1,2,3])
set()
sage: P.order_ideal_complement_generators([1], direction="down")
{2}
sage: P.order_ideal_complement_generators([3], direction="down")
{1, 2}
sage: P.order_ideal_complement_generators([1,2], direction="down")
set()
sage: P.order_ideal_complement_generators([1,2,3], direction="down")
set()
.. WARNING::
This is a brute force implementation, building the
order ideal generated by the antichain, and searching
for order filter generators of its complement
"""
if direction == 'up':
I = self.order_ideal(antichain)
else:
I = self.order_filter(antichain)
I_comp = set(self).difference(I)
return set(self.order_ideal_generators(I_comp, direction = direction))
panyushev_complement = order_ideal_complement_generators
def rowmotion(self, order_ideal):
r"""
The image of the order ideal ``order_ideal`` under rowmotion
in ``self``.
Rowmotion on a finite poset `P` is an automorphism of the set
`J(P)` of all order ideals of `P`. One way to define it is as
follows: Given an order ideal `I \in J(P)`, we let `F` be the
set-theoretic complement of `I` in `P`. Furthermore we let
`A` be the antichain consisting of all minimal elements of
`F`. Then, the rowmotion of `I` is defined to be the order
ideal of `P` generated by the antichain `A` (that is, the
order ideal consisting of each element of `P` which has some
element of `A` above it).
Rowmotion is related (actually, isomorphic) to Panyushev
complementation (:meth:`panyushev_complement`).
INPUT:
- ``order_ideal`` -- an order ideal of ``self``, as a set
OUTPUT:
- the image of ``order_ideal`` under rowmotion, as a set again
EXAMPLES::
sage: P = Poset( {1: [2, 3], 2: [], 3: [], 4: [8], 5: [], 6: [5], 7: [1, 4], 8: []} )
sage: I = Set({2, 6, 1, 7})
sage: P.rowmotion(I)
{1, 3, 4, 5, 6, 7}
sage: P = Poset( {} )
sage: I = Set({})
sage: P.rowmotion(I)
Set of elements of {}
"""
result = order_ideal
for i in reversed(self.linear_extension()):
result = self.order_ideal_toggle(result, i)
return result
def birational_free_labelling(self, linear_extension=None,
prefix='x', base_field=None,
reduced=False, addvars=None):
r"""
Return the birational free labelling of ``self``.
Let us hold back defining this, and introduce birational
toggles and birational rowmotion first. These notions have
been introduced in [EP13]_ as generalizations of the notions
of toggles (:meth:`order_ideal_toggle`) and :meth:`rowmotion
<rowmotion>` on order ideals of a finite poset. They
have been studied further in [GR13]_.
Let `\mathbf{K}` be a field, and `P` be a finite poset. Let
`\widehat{P}` denote the poset obtained from `P` by adding a
new element `1` which is greater than all existing elements
of `P`, and a new element `0` which is smaller than all
existing elements of `P` and `1`. Now, a `\mathbf{K}`-*labelling
of* `P` will mean any function from `\widehat{P}` to `\mathbf{K}`.
The image of an element `v` of `\widehat{P}` under this labelling
will be called the *label* of this labelling at `v`. The set
of all `\mathbf{K}`-labellings of `P` is clearly
`\mathbf{K}^{\widehat{P}}`.
For any `v \in P`, we now define a rational map
`T_v : \mathbf{K}^{\widehat{P}} \dashrightarrow
\mathbf{K}^{\widehat{P}}` as follows: For every `f \in
\mathbf{K}^{\widehat{P}}`, the image `T_v f` should send every
element `u \in \widehat{P}` distinct from `v` to `f(u)` (so the
labels at all `u \neq v` don't change), while `v` is sent to
.. MATH::
\frac{1}{f(v)} \cdot
\frac{\sum_{u \lessdot v} f(u)}
{\sum_{u \gtrdot v} \frac{1}{f(u)}}
(both sums are over all `u \in \widehat{P}` satisfying the
respectively given conditions). Here, `\lessdot` and `\gtrdot`
mean (respectively) "covered by" and "covers", interpreted with
respect to the poset `\widehat{P}`. This rational map `T_v`
is an involution and is called the *(birational)* `v`-*toggle*; see
:meth:`birational_toggle` for its implementation.
Now, *birational rowmotion* is defined as the composition
`T_{v_1} \circ T_{v_2} \circ \cdots \circ T_{v_n}`, where
`(v_1, v_2, \ldots, v_n)` is a linear extension of `P`
(written as a linear ordering of the elements of `P`). This
is a rational map
`\mathbf{K}^{\widehat{P}} \dashrightarrow \mathbf{K}^{\widehat{P}}`
which does not depend on the choice of the linear extension;
it is denoted by `R`. See :meth:`birational_rowmotion` for
its implementation.
The definitions of birational toggles and birational
rowmotion extend to the case of `\mathbf{K}` being any semifield
rather than necessarily a field (although it becomes less
clear what constitutes a rational map in this generality).
The most useful case is that of the :class:`tropical semiring
<sage.rings.semirings.tropical_semiring.TropicalSemiring>`,
in which case birational rowmotion relates to classical
constructions such as promotion of rectangular semistandard
Young tableaux (page 5 of [EP13b]_ and future work, via the
related notion of birational *promotion*) and rowmotion on
order ideals of the poset ([EP13]_).
The *birational free labelling* is a special labelling
defined for every finite poset `P` and every linear extension
`(v_1, v_2, \ldots, v_n)` of `P`. It is given by sending
every element `v_i` in `P` to `x_i`, sending the element `0`
of `\widehat{P}` to `a`, and sending the element `1` of
`\widehat{P}` to `b`, where the ground field `\mathbf{K}` is the
field of rational functions in `n+2` indeterminates
`a, x_1, x_2, \ldots, x_n, b` over `\mathbb Q`.
In Sage, a labelling `f` of a poset `P` is encoded as a
`4`-tuple `(\mathbf{K}, d, u, v)`, where `\mathbf{K}` is the
ground field of the labelling (i. e., its target), `d` is the
dictionary containing the values of `f` at the elements of
`P` (the keys being the respective elements of `P`), `u`
is the label of `f` at `0`, and `v` is the label of `f` at
`1`.
.. WARNING::
The dictionary `d` is labelled by the elements of `P`.
If `P` is a poset with ``facade`` option set to
``False``, these might not be what they seem to be!
(For instance, if
``P == Poset({1: [2, 3]}, facade=False)``, then the
value of `d` at `1` has to be accessed by ``d[P(1)]``, not
by ``d[1]``.)
.. WARNING::
Dictionaries are mutable. They do compare correctly,
but are not hashable and need to be cloned to avoid
spooky action at a distance. Be careful!
INPUT:
- ``linear_extension`` -- (default: the default linear
extension of ``self``) a linear extension of ``self``
(as a linear extension or as a list), or more generally
a list of all elements of all elements of ``self`` each
occurring exactly once
- ``prefix`` -- (default: ``'x'``) the prefix to name
the indeterminates corresponding to the elements of
``self`` in the labelling (so, setting it to
``'frog'`` will result in these indeterminates being
called ``frog1, frog2, ..., frogn`` rather than
``x1, x2, ..., xn``).
- ``base_field`` -- (default: ``QQ``) the base field to
be used instead of `\QQ` to define the rational
function field over; this is not going to be the base
field of the labelling, because the latter will have
indeterminates adjoined!
- ``reduced`` -- (default: ``False``) if set to
``True``, the result will be the *reduced* birational
free labelling, which differs from the regular one by
having `0` and `1` both sent to `1` instead of `a` and
`b` (the indeterminates `a` and `b` then also won't
appear in the ground field)
- ``addvars`` -- (default: ``''``) a string containing
names of extra variables to be adjoined to the ground
field (these don't have an effect on the labels)
OUTPUT:
The birational free labelling of the poset ``self`` and the
linear extension ``linear_extension``. Or, if ``reduced``
is set to ``True``, the reduced birational free labelling.
REFERENCES:
.. [EP13] David Einstein, James Propp.
*Combinatorial, piecewise-linear, and birational homomesy
for products of two chains*.
:arxiv:`1310.5294v1`.
.. [EP13b] David Einstein, James Propp.
*Piecewise-linear and birational toggling*.
Extended abstract for FPSAC 2014.
http://faculty.uml.edu/jpropp/fpsac14.pdf
.. [GR13] Darij Grinberg, Tom Roby.
*Iterative properties of birational rowmotion I*.
http://web.mit.edu/~darij/www/algebra/skeletal.pdf
EXAMPLES:
We construct the birational free labelling on a simple
poset::
sage: P = Poset({1: [2, 3]})
sage: l = P.birational_free_labelling(); l
(Fraction Field of Multivariate Polynomial Ring in a, x1, x2, x3, b over Rational Field,
{...},
a,
b)
sage: sorted(l[1].items())
[(1, x1), (2, x2), (3, x3)]
sage: l = P.birational_free_labelling(linear_extension=[1, 3, 2]); l
(Fraction Field of Multivariate Polynomial Ring in a, x1, x2, x3, b over Rational Field,
{...},
a,
b)
sage: sorted(l[1].items())
[(1, x1), (2, x3), (3, x2)]
sage: l = P.birational_free_labelling(linear_extension=[1, 3, 2], reduced=True, addvars="spam, eggs"); l
(Fraction Field of Multivariate Polynomial Ring in x1, x2, x3, spam, eggs over Rational Field,
{...},
1,
1)
sage: sorted(l[1].items())
[(1, x1), (2, x3), (3, x2)]
sage: l = P.birational_free_labelling(linear_extension=[1, 3, 2], prefix="wut", reduced=True, addvars="spam, eggs"); l
(Fraction Field of Multivariate Polynomial Ring in wut1, wut2, wut3, spam, eggs over Rational Field,
{...},
1,
1)
sage: sorted(l[1].items())
[(1, wut1), (2, wut3), (3, wut2)]
sage: l = P.birational_free_labelling(linear_extension=[1, 3, 2], reduced=False, addvars="spam, eggs"); l
(Fraction Field of Multivariate Polynomial Ring in a, x1, x2, x3, b, spam, eggs over Rational Field,
{...},
a,
b)
sage: sorted(l[1].items())
[(1, x1), (2, x3), (3, x2)]
sage: l[1][2]
x3
Illustrating the warning about facade::
sage: P = Poset({1: [2, 3]}, facade=False)
sage: l = P.birational_free_labelling(linear_extension=[1, 3, 2], reduced=False, addvars="spam, eggs"); l
(Fraction Field of Multivariate Polynomial Ring in a, x1, x2, x3, b, spam, eggs over Rational Field,
{...},
a,
b)
sage: l[1][2]
Traceback (most recent call last):
...
KeyError: 2
sage: l[1][P(2)]
x3
Another poset::
sage: P = Posets.SSTPoset([2,1])
sage: lext = sorted(P)
sage: l = P.birational_free_labelling(linear_extension=lext, addvars="ohai")
sage: l
(Fraction Field of Multivariate Polynomial Ring in a, x1, x2, x3, x4, x5, x6, x7, x8, b, ohai over Rational Field,
{...},
a,
b)
sage: sorted(l[1].items())
[([[1, 1], [2]], x1), ([[1, 1], [3]], x2), ([[1, 2], [2]], x3), ([[1, 2], [3]], x4),
([[1, 3], [2]], x5), ([[1, 3], [3]], x6), ([[2, 2], [3]], x7), ([[2, 3], [3]], x8)]
See :meth:`birational_rowmotion`, :meth:`birational_toggle` and
:meth:`birational_toggles` for more substantial examples of what
one can do with the birational free labelling.
TESTS:
The ``linear_extension`` keyword does not have to be given an
actual linear extension::
sage: P = Posets.ChainPoset(2).product(Posets.ChainPoset(3))
sage: P
Finite lattice containing 6 elements
sage: lex = [(1,0),(0,0),(1,1),(0,1),(1,2),(0,2)]
sage: l = P.birational_free_labelling(linear_extension=lex,
....: prefix="u", reduced=True)
sage: l
(Fraction Field of Multivariate Polynomial Ring in u1, u2, u3, u4, u5, u6 over Rational Field,
{...},
1,
1)
sage: sorted(l[1].items())
[((0, 0), u2),
((0, 1), u4),
((0, 2), u6),
((1, 0), u1),
((1, 1), u3),
((1, 2), u5)]
For comparison, the standard linear extension::
sage: l = P.birational_free_labelling(prefix="u", reduced=True); l
(Fraction Field of Multivariate Polynomial Ring in u1, u2, u3, u4, u5, u6 over Rational Field,
{...},
1,
1)
sage: sorted(l[1].items())
[((0, 0), u1),
((0, 1), u2),
((0, 2), u3),
((1, 0), u4),
((1, 1), u5),
((1, 2), u6)]
If you want your linear extension to be tested for being a
linear extension, just call the ``linear_extension`` method
on the poset::
sage: lex = [(0,0),(0,1),(1,0),(1,1),(0,2),(1,2)]
sage: l = P.birational_free_labelling(linear_extension=P.linear_extension(lex),
....: prefix="u", reduced=True)
sage: l
(Fraction Field of Multivariate Polynomial Ring in u1, u2, u3, u4, u5, u6 over Rational Field,
{...},
1,
1)
sage: sorted(l[1].items())
[((0, 0), u1),
((0, 1), u2),
((0, 2), u5),
((1, 0), u3),
((1, 1), u4),
((1, 2), u6)]
Nonstandard base field::
sage: P = Poset({1: [3], 2: [3,4]})
sage: lex = [1, 2, 4, 3]
sage: l = P.birational_free_labelling(linear_extension=lex,
....: prefix="aaa",
....: base_field=Zmod(13))
sage: l
(Fraction Field of Multivariate Polynomial Ring in a, aaa1, aaa2, aaa3, aaa4, b over Ring of integers modulo 13,
{...},
a,
b)
sage: l[1][4]
aaa3
The empty poset::
sage: P = Poset({})
sage: P.birational_free_labelling(reduced=False, addvars="spam, eggs")
(Fraction Field of Multivariate Polynomial Ring in a, b, spam, eggs over Rational Field,
{},
a,
b)
sage: P.birational_free_labelling(reduced=True, addvars="spam, eggs")
(Fraction Field of Multivariate Polynomial Ring in spam, eggs over Rational Field,
{},
1,
1)
sage: P.birational_free_labelling(reduced=True)
(Multivariate Polynomial Ring in no variables over Rational Field,
{},
1,
1)
sage: P.birational_free_labelling(prefix="zzz")
(Fraction Field of Multivariate Polynomial Ring in a, b over Rational Field,
{},
a,
b)
"""
if base_field is None:
from sage.rings.rational_field import QQ
base_field = QQ
if linear_extension is None:
linear_extension = self.linear_extension()
n = self.cardinality()
varstring = ""
for i in range(1, n + 1):
varstring += prefix + str(i) + ','
if reduced:
varstring = varstring[:-1]
else:
varstring = 'a,' + varstring + 'b'
if addvars:
varstring += ',' + addvars
if len(varstring) > 0 and varstring[0] == ',':
varstring = varstring[1:]
if len(varstring) > 0:
varnum = varstring.count(',') + 1
else:
varnum = 0
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
PR = PolynomialRing(base_field, varstring, varnum)
# Now, ``PR`` is the polynomial ring in `n + 2` indeterminates
# (or more, if ``addvars`` was set; or less, if ``reduced`` is
# ``True``) over ``base_field``.
# The first `n + 2` of these indeterminates are named
# ``a, x1, x2, ..., xn, b`` (if ``reduced`` is ``False``).
# These will label the vertices of `\widehat{P}`.
if reduced:
xs = tuple(PR.gens()[: n])
else:
xs = tuple(PR.gens()[1 : n + 1])
# So ``xs`` is the list ``[x1, x2, ..., xn]``.
if not reduced:
a = PR.gens()[0]
b = PR.gens()[n + 1]
else:
a = PR.one()
b = PR.one()
# So ``a`` and ``b`` are the labels at `0` and `1`.
FF = PR.fraction_field()
# ``FF`` is the field of rational functions.
dct = {self(p): xs[i] for (i, p) in enumerate(linear_extension)}
return (FF, dct, a, b)
def birational_toggle(self, v, labelling):
r"""
Return the result of applying the birational `v`-toggle `T_v`
to the `\mathbf{K}`-labelling ``labelling`` of the poset ``self``.
See the documentation of :meth:`birational_free_labelling`
for a definition of this toggle and of `\mathbf{K}`-labellings as
well as an explanation of how `\mathbf{K}`-labellings are to be
encoded to be understood by Sage. This implementation allows
`\mathbf{K}` to be a semifield, not just a field. The birational
`v`-toggle is only a rational map, so an exception (most
likely, ``ZeroDivisionError``) will be thrown if the
denominator is zero.
INPUT:
- ``v`` -- an element of ``self`` (must have ``self`` as
parent if ``self`` is a ``facade=False`` poset)
- ``labelling`` -- a `\mathbf{K}`-labelling of ``self`` in the
sense as defined in the documentation of
:meth:`birational_free_labelling`
OUTPUT:
The `\mathbf{K}`-labelling `T_v f` of ``self``, where `f` is
``labelling``.
EXAMPLES:
Let us start with the birational free labelling of the
"V"-poset (the three-element poset with Hasse diagram looking
like a "V")::
sage: V = Poset({1: [2, 3]})
sage: s = V.birational_free_labelling(); s
(Fraction Field of Multivariate Polynomial Ring in a, x1, x2, x3, b over Rational Field,
{...},
a,
b)
sage: sorted(s[1].items())
[(1, x1), (2, x2), (3, x3)]
The image of `s` under the `1`-toggle `T_1` is::
sage: s1 = V.birational_toggle(1, s); s1
(Fraction Field of Multivariate Polynomial Ring in a, x1, x2, x3, b over Rational Field,
{...},
a,
b)
sage: sorted(s1[1].items())
[(1, a*x2*x3/(x1*x2 + x1*x3)), (2, x2), (3, x3)]
Now let us apply the `2`-toggle `T_2` (to the old ``s``)::
sage: s2 = V.birational_toggle(2, s); s2
(Fraction Field of Multivariate Polynomial Ring in a, x1, x2, x3, b over Rational Field,
{...},
a,
b)
sage: sorted(s2[1].items())
[(1, x1), (2, x1*b/x2), (3, x3)]
On the other hand, we can also apply `T_2` to the image of `s`
under `T_1`::
sage: s12 = V.birational_toggle(2, s1); s12
(Fraction Field of Multivariate Polynomial Ring in a, x1, x2, x3, b over Rational Field,
{...},
a,
b)
sage: sorted(s12[1].items())
[(1, a*x2*x3/(x1*x2 + x1*x3)), (2, a*x3*b/(x1*x2 + x1*x3)), (3, x3)]
Each toggle is an involution::
sage: all( V.birational_toggle(i, V.birational_toggle(i, s)) == s
....: for i in V )
True
We can also start with a less generic labelling::
sage: t = (QQ, {1: 3, 2: 6, 3: 7}, 2, 10)
sage: t1 = V.birational_toggle(1, t); t1
(Rational Field, {...}, 2, 10)
sage: sorted(t1[1].items())
[(1, 28/13), (2, 6), (3, 7)]
sage: t13 = V.birational_toggle(3, t1); t13
(Rational Field, {...}, 2, 10)
sage: sorted(t13[1].items())
[(1, 28/13), (2, 6), (3, 40/13)]
However, labellings have to be sufficiently generic, lest
denominators vanish::
sage: t = (QQ, {1: 3, 2: 5, 3: -5}, 1, 15)
sage: t1 = V.birational_toggle(1, t)
Traceback (most recent call last):
...
ZeroDivisionError: rational division by zero
We don't get into zero-division issues in the tropical
semiring (unless the zero of the tropical semiring appears
in the labelling)::
sage: TT = TropicalSemiring(QQ)
sage: t = (TT, {1: TT(2), 2: TT(4), 3: TT(1)}, TT(6), TT(0))
sage: t1 = V.birational_toggle(1, t); t1
(Tropical semiring over Rational Field, {...}, 6, 0)
sage: sorted(t1[1].items())
[(1, 8), (2, 4), (3, 1)]
sage: t12 = V.birational_toggle(2, t1); t12
(Tropical semiring over Rational Field, {...}, 6, 0)
sage: sorted(t12[1].items())
[(1, 8), (2, 4), (3, 1)]
sage: t123 = V.birational_toggle(3, t12); t123
(Tropical semiring over Rational Field, {...}, 6, 0)
sage: sorted(t123[1].items())
[(1, 8), (2, 4), (3, 7)]
We turn to more interesting posets. Here is the `6`-element
poset arising from the weak order on `S_3`::
sage: P = Posets.SymmetricGroupWeakOrderPoset(3)
sage: sorted(list(P))
['123', '132', '213', '231', '312', '321']
sage: t = (TT, {'123': TT(4), '132': TT(2), '213': TT(3), '231': TT(1), '321': TT(1), '312': TT(2)}, TT(7), TT(1))
sage: t1 = P.birational_toggle('123', t); t1
(Tropical semiring over Rational Field, {...}, 7, 1)
sage: sorted(t1[1].items())
[('123', 6), ('132', 2), ('213', 3), ('231', 1), ('312', 2), ('321', 1)]
sage: t13 = P.birational_toggle('213', t1); t13
(Tropical semiring over Rational Field, {...}, 7, 1)
sage: sorted(t13[1].items())
[('123', 6), ('132', 2), ('213', 4), ('231', 1), ('312', 2), ('321', 1)]
Let us verify on this example some basic properties of
toggles. First of all, again let us check that `T_v` is an
involution for every `v`::
sage: all( P.birational_toggle(v, P.birational_toggle(v, t)) == t
....: for v in P )
True
Furthermore, two toggles `T_v` and `T_w` commute unless
one of `v` or `w` covers the other::
sage: all( P.covers(v, w) or P.covers(w, v)
....: or P.birational_toggle(v, P.birational_toggle(w, t))
....: == P.birational_toggle(w, P.birational_toggle(v, t))
....: for v in P for w in P )
True
TESTS:
Setting ``facade`` to ``False`` does not break
``birational_toggle``::
sage: P = Poset({'x': ['y', 'w'], 'y': ['z'], 'w': ['z']}, facade=False)
sage: lex = ['x', 'y', 'w', 'z']
sage: t = P.birational_free_labelling(linear_extension=lex)
sage: all( P.birational_toggle(v, P.birational_toggle(v, t)) == t
....: for v in P )
True
sage: t4 = P.birational_toggle(P('z'), t); t4
(Fraction Field of Multivariate Polynomial Ring in a, x1, x2, x3, x4, b over Rational Field,
{...},
a,
b)
sage: t4[1][P('x')]
x1
sage: t4[1][P('y')]
x2
sage: t4[1][P('w')]
x3
sage: t4[1][P('z')]
(x2*b + x3*b)/x4
The one-element poset::
sage: P = Poset({8: []})
sage: t = P.birational_free_labelling()
sage: t8 = P.birational_toggle(8, t); t8
(Fraction Field of Multivariate Polynomial Ring in a, x1, b over Rational Field,
{...},
a,
b)
sage: t8[1][8]
a*b/x1
"""
FF = labelling[0] # base field
a = labelling[2] # label at `0 \in \widehat{P}`
b = labelling[3]
newdict = labelling[1].copy()
# Construct the harmonic sum ``x`` of the labels at the
# elements covering ``v``:
uppers = self.upper_covers(v)
if len(uppers) == 0:
x = FF.one() / b
else:
x = FF.sum(FF.one() / newdict[j] for j in uppers)
# ``FF.sum``, not ``sum``, see trac #15591.
x = FF.one() / x
# Construct the sum ``y`` of the labels at the elements
# covered by ``v``:
lowers = self.lower_covers(v)
if len(lowers) == 0:
y = a
else:
y = FF.sum(newdict[j] for j in lowers)
# Now, transform the label at v:
newdict[v] = x * y / newdict[v]
return (FF, newdict, a, b)
def birational_toggles(self, vs, labelling):
r"""
Return the result of applying a sequence of birational
toggles (specified by ``vs``) to the `\mathbf{K}`-labelling
``labelling`` of the poset ``self``.
See the documentation of :meth:`birational_free_labelling`
for a definition of birational toggles and `\mathbf{K}`-labellings
and for an explanation of how `\mathbf{K}`-labellings are to be
encoded to be understood by Sage. This implementation allows
`\mathbf{K}` to be a semifield, not just a field. The birational
`v`-toggle is only a rational map, so an exception (most
likely, ``ZeroDivisionError``) will be thrown if the
denominator is zero.
INPUT:
- ``vs`` -- an iterable comprising elements of ``self``
(which must have ``self`` as parent if ``self`` is a
``facade=False`` poset)
- ``labelling`` -- a `\mathbf{K}`-labelling of ``self`` in the
sense as defined in the documentation of
:meth:`birational_free_labelling`
OUTPUT:
The `\mathbf{K}`-labelling `T_{v_n} T_{v_{n-1}} \cdots T_{v_1} f`
of ``self``, where `f` is ``labelling`` and
`(v_1, v_2, \ldots, v_n)` is ``vs`` (written as list).
EXAMPLES::
sage: P = Posets.SymmetricGroupBruhatOrderPoset(3)
sage: sorted(list(P))
['123', '132', '213', '231', '312', '321']
sage: TT = TropicalSemiring(ZZ)
sage: t = (TT, {'123': TT(4), '132': TT(2), '213': TT(3), '231': TT(1), '321': TT(1), '312': TT(2)}, TT(7), TT(1))
sage: tA = P.birational_toggles(['123', '231', '312'], t); tA
(Tropical semiring over Integer Ring, {...}, 7, 1)
sage: sorted(tA[1].items())
[('123', 6), ('132', 2), ('213', 3), ('231', 2), ('312', 1), ('321', 1)]
sage: tAB = P.birational_toggles(['132', '213', '321'], tA); tAB
(Tropical semiring over Integer Ring, {...}, 7, 1)
sage: sorted(tAB[1].items())
[('123', 6), ('132', 6), ('213', 5), ('231', 2), ('312', 1), ('321', 1)]
sage: P = Poset({1: [2, 3], 2: [4], 3: [4]})
sage: Qx = PolynomialRing(QQ, 'x').fraction_field()
sage: x = Qx.gen()
sage: t = (Qx, {1: 1, 2: x, 3: (x+1)/x, 4: x^2}, 1, 1)
sage: t1 = P.birational_toggles((i for i in range(1, 5)), t); t1
(Fraction Field of Univariate Polynomial Ring in x over Rational Field,
{...},
1,
1)
sage: sorted(t1[1].items())
[(1, (x^2 + x)/(x^2 + x + 1)), (2, (x^3 + x^2)/(x^2 + x + 1)), (3, x^4/(x^2 + x + 1)), (4, 1)]
sage: t2 = P.birational_toggles(reversed(range(1, 5)), t)
sage: sorted(t2[1].items())
[(1, 1/x^2), (2, (x^2 + x + 1)/x^4), (3, (x^2 + x + 1)/(x^3 + x^2)), (4, (x^2 + x + 1)/x^3)]
Facade set to ``False`` works::
sage: P = Poset({'x': ['y', 'w'], 'y': ['z'], 'w': ['z']}, facade=False)
sage: lex = ['x', 'y', 'w', 'z']
sage: t = P.birational_free_labelling(linear_extension=lex)
sage: sorted(P.birational_toggles([P('x'), P('y')], t)[1].items())
[(x, a*x2*x3/(x1*x2 + x1*x3)), (y, a*x3*x4/(x1*x2 + x1*x3)), (w, x3), (z, x4)]
"""
l = labelling
for v in vs:
l = self.birational_toggle(v, l)
return l
def birational_rowmotion(self, labelling):
r"""
Return the result of applying birational rowmotion to the
`\mathbf{K}`-labelling ``labelling`` of the poset ``self``.
See the documentation of :meth:`birational_free_labelling`
for a definition of birational rowmotion and
`\mathbf{K}`-labellings and for an explanation of how
`\mathbf{K}`-labellings are to be encoded to be understood
by Sage. This implementation allows `\mathbf{K}` to be a
semifield, not just a field. Birational rowmotion is only a
rational map, so an exception (most likely, ``ZeroDivisionError``)
will be thrown if the denominator is zero.
INPUT:
- ``labelling`` -- a `\mathbf{K}`-labelling of ``self`` in the
sense as defined in the documentation of
:meth:`birational_free_labelling`
OUTPUT:
The image of the `\mathbf{K}`-labelling `f` under birational
rowmotion.
EXAMPLES::
sage: P = Poset({1: [2, 3], 2: [4], 3: [4]})
sage: lex = [1, 2, 3, 4]
sage: t = P.birational_free_labelling(linear_extension=lex); t
(Fraction Field of Multivariate Polynomial Ring in a, x1, x2, x3, x4, b over Rational Field,
{...},
a,
b)
sage: sorted(t[1].items())
[(1, x1), (2, x2), (3, x3), (4, x4)]
sage: t = P.birational_rowmotion(t); t
(Fraction Field of Multivariate Polynomial Ring in a, x1, x2, x3, x4, b over Rational Field,
{...},
a,
b)
sage: sorted(t[1].items())
[(1, a*b/x4), (2, (x1*x2*b + x1*x3*b)/(x2*x4)),
(3, (x1*x2*b + x1*x3*b)/(x3*x4)), (4, (x2*b + x3*b)/x4)]
A result of [GR13]_ states that applying birational rowmotion
`n+m` times to a `\mathbf{K}`-labelling `f` of the poset
`[n] \times [m]` gives back `f`. Let us check this::
sage: def test_rectangle_periodicity(n, m, k):
....: P = Posets.ChainPoset(n).product(Posets.ChainPoset(m))
....: t0 = P.birational_free_labelling(P)
....: t = t0
....: for i in range(k):
....: t = P.birational_rowmotion(t)
....: return t == t0
sage: test_rectangle_periodicity(2, 2, 4)
True
sage: test_rectangle_periodicity(2, 2, 2)
False
sage: test_rectangle_periodicity(2, 3, 5) # long time
True
While computations with the birational free labelling quickly
run out of memory due to the complexity of the rational
functions involved, it is computationally cheap to check
properties of birational rowmotion on examples in the tropical
semiring::
sage: def test_rectangle_periodicity_tropical(n, m, k):
....: P = Posets.ChainPoset(n).product(Posets.ChainPoset(m))
....: TT = TropicalSemiring(ZZ)
....: t0 = (TT, {v: TT(floor(random()*100)) for v in P}, TT(0), TT(124))
....: t = t0
....: for i in range(k):
....: t = P.birational_rowmotion(t)
....: return t == t0
sage: test_rectangle_periodicity_tropical(7, 6, 13)
True
Tropicalization is also what relates birational rowmotion to
classical rowmotion on order ideals. In fact, if `T` denotes
the :class:`tropical semiring
<sage.rings.semirings.tropical_semiring.TropicalSemiring>` of
`\ZZ` and `P` is a finite poset, then we can define an embedding
`\phi` from the set `J(P)` of all order ideals of `P` into the
set `T^{\widehat{P}}` of all `T`-labellings of `P` by sending
every `I \in J(P)` to the indicator function of `I` extended by
the value `1` at the element `0` and the value `0` at the
element `1`. This map `\phi` has the property that
`R \circ \phi = \phi \circ r`, where `R` denotes birational
rowmotion, and `r` denotes :meth:`classical rowmotion <rowmotion>`
on `J(P)`. An example::
sage: P = Posets.IntegerPartitions(5)
sage: TT = TropicalSemiring(ZZ)
sage: def indicator_labelling(I):
....: # send order ideal `I` to a `T`-labelling of `P`.
....: dct = {v: TT(v in I) for v in P}
....: return (TT, dct, TT(1), TT(0))
sage: all(indicator_labelling(P.rowmotion(I))
....: == P.birational_rowmotion(indicator_labelling(I))
....: for I in P.order_ideals_lattice(facade=True))
True
TESTS:
Facade set to false works::
sage: P = Poset({1: [2, 3], 2: [4], 3: [4]}, facade=False)
sage: lex = [1, 2, 3, 4]
sage: t = P.birational_free_labelling(linear_extension=lex); t
(Fraction Field of Multivariate Polynomial Ring in a, x1, x2, x3, x4, b over Rational Field,
{...},
a,
b)
sage: t = P.birational_rowmotion(t); t
(Fraction Field of Multivariate Polynomial Ring in a, x1, x2, x3, x4, b over Rational Field,
{...},
a,
b)
sage: t[1][P(2)]
(x1*x2*b + x1*x3*b)/(x2*x4)
sage: t = P.birational_rowmotion(t)
sage: t[1][P(2)]
a*b/x3
"""
l = labelling
for v in reversed(self.linear_extension()):
l = self.birational_toggle(v, l)
return l
def panyushev_orbits(self, element_constructor = set):
r"""
Return the Panyushev orbits of antichains in ``self``.
The Panyushev orbit of an antichain is its orbit under
Panyushev complementation (see
:meth:`panyushev_complement`).
INPUT:
- ``element_constructor`` (defaults to ``set``) -- a type
constructor (``set``, ``tuple``, ``list``, ``frozenset``,
``iter``, etc.) which is to be applied to the antichains
before they are returned.
OUTPUT:
- the partition of the set of all antichains of ``self`` into
orbits under Panyushev complementation. This is returned as
a list of lists ``L`` such that for each ``L`` and ``i``,
cyclically:
``self.order_ideal_complement_generators(L[i]) == L[i+1]``.
The entries ``L[i]`` are sets by default, but depending on
the optional keyword variable ``element_constructors``
they can also be tuples, lists etc.
EXAMPLES::
sage: P = Poset( ( [1,2,3], [ [1,3], [2,3] ] ) )
sage: P.panyushev_orbits()
[[{2}, {1}], [set(), {1, 2}, {3}]]
sage: P.panyushev_orbits(element_constructor=list)
[[[2], [1]], [[], [1, 2], [3]]]
sage: P.panyushev_orbits(element_constructor=frozenset)
[[frozenset({2}), frozenset({1})],
[frozenset(), frozenset({1, 2}), frozenset({3})]]
sage: P.panyushev_orbits(element_constructor=tuple)
[[(2,), (1,)], [(), (1, 2), (3,)]]
sage: P = Poset( {} )
sage: P.panyushev_orbits()
[[set()]]
"""
# TODO: implement a generic function taking a set and
# bijections on this set, and returning the orbits.
AC = set(self.antichains(element_constructor = frozenset))
orbits = []
while AC:
A = AC.pop()
orbit = [ A ]
while True:
A = frozenset(self.order_ideal_complement_generators(A))
if A not in AC: break
orbit.append( A )
AC.remove( A )
orbits.append([element_constructor(_) for _ in orbit])
return orbits
def rowmotion_orbits(self, element_constructor = set):
r"""
Return the rowmotion orbits of order ideals in ``self``.
The rowmotion orbit of an order ideal is its orbit under
rowmotion (see :meth:`rowmotion`).
INPUT:
- ``element_constructor`` (defaults to ``set``) -- a type
constructor (``set``, ``tuple``, ``list``, ``frozenset``,
``iter``, etc.) which is to be applied to the antichains
before they are returned.
OUTPUT:
- the partition of the set of all order ideals of ``self``
into orbits under rowmotion. This is returned as
a list of lists ``L`` such that for each ``L`` and ``i``,
cyclically: ``self.rowmotion(L[i]) == L[i+1]``.
The entries ``L[i]`` are sets by default, but depending on
the optional keyword variable ``element_constructors``
they can also be tuples, lists etc.
EXAMPLES::
sage: P = Poset( {1: [2, 3], 2: [], 3: [], 4: [2]} )
sage: sorted(len(o) for o in P.rowmotion_orbits())
[3, 5]
sage: sorted(P.rowmotion_orbits(element_constructor=list))
[[[1, 3], [4], [1], [4, 1, 3], [4, 1, 2]], [[4, 1], [4, 1, 2, 3], []]]
sage: sorted(P.rowmotion_orbits(element_constructor=tuple))
[[(1, 3), (4,), (1,), (4, 1, 3), (4, 1, 2)], [(4, 1), (4, 1, 2, 3), ()]]
sage: P = Poset({})
sage: sorted(P.rowmotion_orbits(element_constructor=tuple))
[[()]]
"""
pan_orbits = self.panyushev_orbits(element_constructor = list)
return [[element_constructor(self.order_ideal(oideal)) for oideal in orbit] for orbit in pan_orbits]
def toggling_orbits(self, vs, element_constructor = set):
r"""
Return the orbits of order ideals in ``self`` under the
operation of toggling the vertices ``vs[0], vs[1], ...``
in this order.
See :meth:`order_ideal_toggle` for a definition of toggling.
.. WARNING::
The orbits are those under the composition of toggles,
*not* under the single toggles themselves. Thus, for
example, if ``vs == [1,2]``, then the orbits have the
form `(I, T_2 T_1 I, T_2 T_1 T_2 T_1 I, \ldots)`
(where `I` denotes an order ideal and `T_i` means
toggling at `i`) rather than
`(I, T_1 I, T_2 T_1 I, T_1 T_2 T_1 I, \ldots)`.
INPUT:
- ``vs``: a list (or other iterable) of elements of ``self``
(but since the output depends on the order, sets should
not be used as ``vs``).
OUTPUT:
- a partition of the order ideals of ``self``, as a list of
sets ``L`` such that for each ``L`` and ``i``, cyclically:
``self.order_ideal_toggles(L[i], vs) == L[i+1]``.
EXAMPLES::
sage: P = Poset( {1: [2, 4], 2: [], 3: [4], 4: []} )
sage: sorted(len(o) for o in P.toggling_orbits([1, 2]))
[2, 3, 3]
sage: P = Poset( {1: [3], 2: [1, 4], 3: [], 4: [3]} )
sage: sorted(len(o) for o in P.toggling_orbits((1, 2, 4, 3)))
[3, 3]
"""
# TODO: implement a generic function taking a set and
# bijections on this set, and returning the orbits.
OI = set(self.order_ideals_lattice(facade=True))
orbits = []
while OI:
A = OI.pop()
orbit = [ A ]
while True:
A = self.order_ideal_toggles(A, vs)
if A not in OI: break
orbit.append( A )
OI.remove( A )
orbits.append([element_constructor(_) for _ in orbit])
return orbits
def panyushev_orbit_iter(self, antichain, element_constructor=set, stop=True, check=True):
r"""
Iterate over the Panyushev orbit of an antichain
``antichain`` of ``self``.
The Panyushev orbit of an antichain is its orbit under
Panyushev complementation (see
:meth:`panyushev_complement`).
INPUT:
- ``antichain`` -- an antichain of ``self``, given as an
iterable.
- ``element_constructor`` (defaults to ``set``) -- a type
constructor (``set``, ``tuple``, ``list``, ``frozenset``,
``iter``, etc.) which is to be applied to the antichains
before they are yielded.
- ``stop`` -- a Boolean (default: ``True``) determining
whether the iterator should stop once it completes its
cycle (this happens when it is set to ``True``) or go on
forever (this happens when it is set to ``False``).
- ``check`` -- a Boolean (default: ``True``) determining
whether ``antichain`` should be checked for being an
antichain.
OUTPUT:
- an iterator over the orbit of the antichain ``antichain``
under Panyushev complementation. This iterator `I` has the
property that ``I[0] == antichain`` and each `i` satisfies
``self.order_ideal_complement_generators(I[i]) == I[i+1]``,
where ``I[i+1]`` has to be understood as ``I[0]`` if it is
undefined.
The entries ``I[i]`` are sets by default, but depending on
the optional keyword variable ``element_constructors``
they can also be tuples, lists etc.
EXAMPLES::
sage: P = Poset( ( [1,2,3], [ [1,3], [2,3] ] ) )
sage: list(P.panyushev_orbit_iter(set([1, 2])))
[{1, 2}, {3}, set()]
sage: list(P.panyushev_orbit_iter([1, 2]))
[{1, 2}, {3}, set()]
sage: list(P.panyushev_orbit_iter([2, 1]))
[{1, 2}, {3}, set()]
sage: list(P.panyushev_orbit_iter(set([1, 2]), element_constructor=list))
[[1, 2], [3], []]
sage: list(P.panyushev_orbit_iter(set([1, 2]), element_constructor=frozenset))
[frozenset({1, 2}), frozenset({3}), frozenset()]
sage: list(P.panyushev_orbit_iter(set([1, 2]), element_constructor=tuple))
[(1, 2), (3,), ()]
sage: P = Poset( {} )
sage: list(P.panyushev_orbit_iter([]))
[set()]
sage: P = Poset({ 1: [2, 3], 2: [4], 3: [4], 4: [] })
sage: Piter = P.panyushev_orbit_iter([2], stop=False)
sage: next(Piter)
{2}
sage: next(Piter)
{3}
sage: next(Piter)
{2}
sage: next(Piter)
{3}
"""
# TODO: implement a generic function taking a set and
# bijections on this set, and returning an orbit of a given
# element.
if check:
if not self.is_antichain_of_poset(antichain):
raise ValueError("the given antichain is not an antichain")
starter = set(antichain) # sanitize input
yield element_constructor(starter)
next = starter
if stop:
while True:
next = self.order_ideal_complement_generators(next)
if next == starter:
break
yield element_constructor(next)
else:
while True:
next = self.order_ideal_complement_generators(next)
yield element_constructor(next)
def rowmotion_orbit_iter(self, oideal, element_constructor=set, stop=True, check=True):
r"""
Iterate over the rowmotion orbit of an order ideal
``oideal`` of ``self``.
The rowmotion orbit of an order ideal is its orbit under
rowmotion (see :meth:`rowmotion`).
INPUT:
- ``oideal`` -- an order ideal of ``self``, given as an
iterable.
- ``element_constructor`` (defaults to ``set``) -- a type
constructor (``set``, ``tuple``, ``list``, ``frozenset``,
``iter``, etc.) which is to be applied to the order
ideals before they are yielded.
- ``stop`` -- a Boolean (default: ``True``) determining
whether the iterator should stop once it completes its
cycle (this happens when it is set to ``True``) or go on
forever (this happens when it is set to ``False``).
- ``check`` -- a Boolean (default: ``True``) determining
whether ``oideal`` should be checked for being an
order ideal.
OUTPUT:
- an iterator over the orbit of the order ideal ``oideal``
under rowmotion. This iterator `I` has the property that
``I[0] == oideal`` and that every `i` satisfies
``self.rowmotion(I[i]) == I[i+1]``, where ``I[i+1]`` has
to be understood as ``I[0]`` if it is undefined.
The entries ``I[i]`` are sets by default, but depending on
the optional keyword variable ``element_constructors``
they can also be tuples, lists etc.
EXAMPLES::
sage: P = Poset( ( [1,2,3], [ [1,3], [2,3] ] ) )
sage: list(P.rowmotion_orbit_iter(set([1, 2])))
[{1, 2}, {1, 2, 3}, set()]
sage: list(P.rowmotion_orbit_iter([1, 2]))
[{1, 2}, {1, 2, 3}, set()]
sage: list(P.rowmotion_orbit_iter([2, 1]))
[{1, 2}, {1, 2, 3}, set()]
sage: list(P.rowmotion_orbit_iter(set([1, 2]), element_constructor=list))
[[1, 2], [1, 2, 3], []]
sage: list(P.rowmotion_orbit_iter(set([1, 2]), element_constructor=frozenset))
[frozenset({1, 2}), frozenset({1, 2, 3}), frozenset()]
sage: list(P.rowmotion_orbit_iter(set([1, 2]), element_constructor=tuple))
[(1, 2), (1, 2, 3), ()]
sage: P = Poset( {} )
sage: list(P.rowmotion_orbit_iter([]))
[set()]
sage: P = Poset({ 1: [2, 3], 2: [4], 3: [4], 4: [] })
sage: Piter = P.rowmotion_orbit_iter([1, 2, 3], stop=False)
sage: next(Piter)
{1, 2, 3}
sage: next(Piter)
{1, 2, 3, 4}
sage: next(Piter)
set()
sage: next(Piter)
{1}
sage: next(Piter)
{1, 2, 3}
sage: P = Poset({ 1: [4], 2: [4, 5], 3: [5] })
sage: list(P.rowmotion_orbit_iter([1, 2], element_constructor=list))
[[1, 2], [1, 2, 3, 4], [2, 3, 5], [1], [2, 3], [1, 2, 3, 5], [1, 2, 4], [3]]
"""
# TODO: implement a generic function taking a set and
# bijections on this set, and returning an orbit of a given
# element.
if check:
if not self.is_order_ideal(oideal):
raise ValueError("the given order ideal is not an order ideal")
starter = set(oideal) # sanitize input
yield element_constructor(starter)
next = starter
if stop:
while True:
next = self.rowmotion(next)
if next == starter:
break
yield element_constructor(next)
else:
while True:
next = self.rowmotion(next)
yield element_constructor(next)
def toggling_orbit_iter(self, vs, oideal, element_constructor=set, stop=True, check=True):
r"""
Iterate over the orbit of an order ideal ``oideal`` of
``self`` under the operation of toggling the vertices
``vs[0], vs[1], ...`` in this order.
See :meth:`order_ideal_toggle` for a definition of toggling.
.. WARNING::
The orbit is that under the composition of toggles,
*not* under the single toggles themselves. Thus, for
example, if ``vs == [1,2]``, then the orbit has the
form `(I, T_2 T_1 I, T_2 T_1 T_2 T_1 I, \ldots)`
(where `I` denotes ``oideal`` and `T_i` means
toggling at `i`) rather than
`(I, T_1 I, T_2 T_1 I, T_1 T_2 T_1 I, \ldots)`.
INPUT:
- ``vs``: a list (or other iterable) of elements of ``self``
(but since the output depends on the order, sets should
not be used as ``vs``).
- ``oideal`` -- an order ideal of ``self``, given as an
iterable.
- ``element_constructor`` (defaults to ``set``) -- a type
constructor (``set``, ``tuple``, ``list``, ``frozenset``,
``iter``, etc.) which is to be applied to the order
ideals before they are yielded.
- ``stop`` -- a Boolean (default: ``True``) determining
whether the iterator should stop once it completes its
cycle (this happens when it is set to ``True``) or go on
forever (this happens when it is set to ``False``).
- ``check`` -- a Boolean (default: ``True``) determining
whether ``oideal`` should be checked for being an
order ideal.
OUTPUT:
- an iterator over the orbit of the order ideal ``oideal``
under toggling the vertices in the list ``vs`` in this
order. This iterator `I` has the property that
``I[0] == oideal`` and that every `i` satisfies
``self.order_ideal_toggles(I[i], vs) == I[i+1]``, where
``I[i+1]`` has to be understood as ``I[0]`` if it is
undefined.
The entries ``I[i]`` are sets by default, but depending on
the optional keyword variable ``element_constructors``
they can also be tuples, lists etc.
EXAMPLES::
sage: P = Poset( ( [1,2,3], [ [1,3], [2,3] ] ) )
sage: list(P.toggling_orbit_iter([1, 3, 1], set([1, 2])))
[{1, 2}]
sage: list(P.toggling_orbit_iter([1, 2, 3], set([1, 2])))
[{1, 2}, set(), {1, 2, 3}]
sage: list(P.toggling_orbit_iter([3, 2, 1], set([1, 2])))
[{1, 2}, {1, 2, 3}, set()]
sage: list(P.toggling_orbit_iter([3, 2, 1], set([1, 2]), element_constructor=list))
[[1, 2], [1, 2, 3], []]
sage: list(P.toggling_orbit_iter([3, 2, 1], set([1, 2]), element_constructor=frozenset))
[frozenset({1, 2}), frozenset({1, 2, 3}), frozenset()]
sage: list(P.toggling_orbit_iter([3, 2, 1], set([1, 2]), element_constructor=tuple))
[(1, 2), (1, 2, 3), ()]
sage: list(P.toggling_orbit_iter([3, 2, 1], [2, 1], element_constructor=tuple))
[(1, 2), (1, 2, 3), ()]
sage: P = Poset( {} )
sage: list(P.toggling_orbit_iter([], []))
[set()]
sage: P = Poset({ 1: [2, 3], 2: [4], 3: [4], 4: [] })
sage: Piter = P.toggling_orbit_iter([1, 2, 4, 3], [1, 2, 3], stop=False)
sage: next(Piter)
{1, 2, 3}
sage: next(Piter)
{1}
sage: next(Piter)
set()
sage: next(Piter)
{1, 2, 3}
sage: next(Piter)
{1}
"""
# TODO: implement a generic function taking a set and
# bijections on this set, and returning an orbit of a given
# element.
if check:
if not self.is_order_ideal(oideal):
raise ValueError("the given order ideal is not an order ideal")
starter = set(oideal) # sanitize input
yield element_constructor(starter)
next = starter
if stop:
while True:
next = self.order_ideal_toggles(next, vs)
if next == starter:
break
yield element_constructor(next)
else:
while True:
next = self.order_ideal_toggles(next, vs)
yield element_constructor(next)
def order_ideals_lattice(self, as_ideals=True, facade=None):
r"""
Return the lattice of order ideals of a poset ``self``,
ordered by inclusion.
The lattice of order ideals of a poset `P` is usually
denoted by `J(P)`. Its underlying set is the set of order
ideals of `P`, and its partial order is given by
inclusion.
The order ideals of `P` are in a canonical bijection
with the antichains of `P`. The bijection maps every
order ideal to the antichain formed by its maximal
elements. By setting the ``as_ideals`` keyword variable to
``False``, one can make this method apply this bijection
before returning the lattice.
INPUT:
- ``as_ideals`` -- Boolean, if ``True`` (default) returns
a poset on the set of order ideals, otherwise on the set
of antichains
- ``facade`` -- Boolean or ``None`` (default). Whether to
return a facade lattice or not. By default return facade
lattice if the poset is a facade poset.
EXAMPLES::
sage: P = Posets.PentagonPoset()
sage: P.cover_relations()
[[0, 1], [0, 2], [1, 4], [2, 3], [3, 4]]
sage: J = P.order_ideals_lattice(); J
Finite lattice containing 8 elements
sage: list(J)
[{}, {0}, {0, 2}, {0, 2, 3}, {0, 1}, {0, 1, 2}, {0, 1, 2, 3}, {0, 1, 2, 3, 4}]
As a lattice on antichains::
sage: J2 = P.order_ideals_lattice(False); J2
Finite lattice containing 8 elements
sage: list(J2)
[(0,), (1, 2), (1, 3), (1,), (2,), (3,), (4,), ()]
TESTS::
sage: J = Posets.DiamondPoset(4, facade = True).order_ideals_lattice(); J
Finite lattice containing 6 elements
sage: list(J)
[{}, {0}, {0, 2}, {0, 1}, {0, 1, 2}, {0, 1, 2, 3}]
sage: J.cover_relations()
[[{}, {0}], [{0}, {0, 2}], [{0}, {0, 1}], [{0, 2}, {0, 1, 2}], [{0, 1}, {0, 1, 2}], [{0, 1, 2}, {0, 1, 2, 3}]]
sage: P = Poset({1:[2]})
sage: J_facade = P.order_ideals_lattice()
sage: J_nonfacade = P.order_ideals_lattice(facade=False)
sage: type(J_facade[0]) == type(J_nonfacade[0])
False
"""
from sage.combinat.posets.lattices import LatticePoset
if facade is None:
facade = self._is_facade
if as_ideals:
from sage.misc.misc import attrcall
from sage.sets.set import Set
ideals = [Set(self.order_ideal(antichain))
for antichain in self.antichains()]
return LatticePoset((ideals, attrcall("issubset")),
facade=facade)
else:
from sage.misc.cachefunc import cached_function
antichains = [tuple(a) for a in self.antichains()]
@cached_function
def is_above(a, xb):
return any(self.is_lequal(xa, xb) for xa in a)
def cmp(a, b):
return all(is_above(a, xb) for xb in b)
return LatticePoset((antichains, cmp), facade=facade)
@abstract_method(optional = True)
def antichains(self):
r"""
Return all antichains of ``self``.
EXAMPLES::
sage: A = Posets.PentagonPoset().antichains(); A
Set of antichains of Finite lattice containing 5 elements
sage: list(A)
[[], [0], [1], [1, 2], [1, 3], [2], [3], [4]]
"""
def directed_subsets(self, direction):
r"""
Return the order filters (resp. order ideals) of ``self``, as lists.
If ``direction`` is 'up', returns the order filters (upper sets).
If ``direction`` is 'down', returns the order ideals (lower sets).
INPUT:
- ``direction`` -- 'up' or 'down'
EXAMPLES::
sage: P = Poset((divisors(12), attrcall("divides")), facade=True)
sage: A = P.directed_subsets('up')
sage: sorted(list(A))
[[], [1, 2, 4, 3, 6, 12], [2, 4, 3, 6, 12], [2, 4, 6, 12], [3, 6, 12], [4, 3, 6, 12], [4, 6, 12], [4, 12], [6, 12], [12]]
TESTS::
sage: list(Poset().directed_subsets('up'))
[[]]
"""
if direction != 'up' and direction != 'down':
raise ValueError("Direction must be either 'up' or 'down'.")
return self.antichains().map(lambda elements: self.directed_subset(elements, direction))
|
py | b410724bec6dd4e8dff03340d27172938acef9f4 | # Generated by Django 2.2.18 on 2021-02-25 10:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("mainapp", "0005_auto_20210218_1136"),
]
operations = [
migrations.AddField(
model_name="productcategory",
name="is_active",
field=models.BooleanField(default=True, verbose_name="категория активна"),
),
migrations.AlterField(
model_name="product",
name="description",
field=models.TextField(blank=True, verbose_name="описание продукта"),
),
migrations.AlterField(
model_name="product",
name="name",
field=models.CharField(max_length=128, verbose_name="имя продукта"),
),
migrations.AlterField(
model_name="product",
name="price",
field=models.DecimalField(decimal_places=2, default=0, max_digits=8, verbose_name="цена продукта"),
),
migrations.AlterField(
model_name="product",
name="quantity",
field=models.PositiveIntegerField(default=0, verbose_name="количество на складе"),
),
migrations.AlterField(
model_name="product",
name="short_desc",
field=models.CharField(blank=True, max_length=60, verbose_name="краткое описание продукта"),
),
migrations.AlterField(
model_name="productcategory",
name="description",
field=models.TextField(blank=True, verbose_name="описание"),
),
migrations.AlterField(
model_name="productcategory",
name="name",
field=models.CharField(max_length=64, unique=True, verbose_name="имя"),
),
]
|
py | b41072750370942939844bce8cd311c2fe560bd0 | import os
import bpy
from . import object, mesh, material, camera, light
from .. import logger
def active_object():
return bpy.context.scene.objects.active
def init():
logger.debug('Initializing API')
object._MESH_MAP.clear()
def selected_objects(valid_types=None):
logger.debug('api.selected_objects(%s)', valid_types)
for node in bpy.context.selected_objects:
if valid_types is None:
yield node.name
elif valid_types is not None and node.type in valid_types:
yield node.name
def set_active_object(obj):
bpy.context.scene.objects.active = obj
def scene_name():
return os.path.basename(bpy.data.filepath)
|
py | b41072e606d34332f8c7d7c7e656502e8b6140f2 | """Local Lambda Service that only invokes a function"""
import json
import logging
import io
from flask import Flask, request
from samcli.lib.utils.stream_writer import StreamWriter
from samcli.local.services.base_local_service import BaseLocalService, LambdaOutputParser
from samcli.local.lambdafn.exceptions import FunctionNotFound
from .lambda_error_responses import LambdaErrorResponses
LOG = logging.getLogger(__name__)
class LocalLambdaInvokeService(BaseLocalService):
def __init__(self, lambda_runner, port, host, stderr=None):
"""
Creates a Local Lambda Service that will only response to invoking a function
Parameters
----------
lambda_runner samcli.commands.local.lib.local_lambda.LocalLambdaRunner
The Lambda runner class capable of invoking the function
port int
Optional. port for the service to start listening on
host str
Optional. host to start the service on
stderr io.BaseIO
Optional stream where the stderr from Docker container should be written to
"""
super(LocalLambdaInvokeService, self).__init__(lambda_runner.is_debugging(), port=port, host=host)
self.lambda_runner = lambda_runner
self.stderr = stderr
def create(self):
"""
Creates a Flask Application that can be started.
"""
self._app = Flask(__name__)
path = "/2015-03-31/functions/<function_name>/invocations"
self._app.add_url_rule(
path,
endpoint=path,
view_func=self._invoke_request_handler,
methods=["POST"],
provide_automatic_options=False,
)
# setup request validation before Flask calls the view_func
self._app.before_request(LocalLambdaInvokeService.validate_request)
self._construct_error_handling()
@staticmethod
def validate_request():
"""
Validates the incoming request
The following are invalid
1. The Request data is not json serializable
2. Query Parameters are sent to the endpoint
3. The Request Content-Type is not application/json
4. 'X-Amz-Log-Type' header is not 'None'
5. 'X-Amz-Invocation-Type' header is not 'RequestResponse'
Returns
-------
flask.Response
If the request is not valid a flask Response is returned
None:
If the request passes all validation
"""
flask_request = request
request_data = flask_request.get_data()
if not request_data:
request_data = b"{}"
request_data = request_data.decode("utf-8")
try:
json.loads(request_data)
except ValueError as json_error:
LOG.debug("Request body was not json. Exception: %s", str(json_error))
return LambdaErrorResponses.invalid_request_content(
"Could not parse request body into json: No JSON object could be decoded"
)
if flask_request.args:
LOG.debug("Query parameters are in the request but not supported")
return LambdaErrorResponses.invalid_request_content("Query Parameters are not supported")
request_headers = flask_request.headers
log_type = request_headers.get("X-Amz-Log-Type", "None")
if log_type != "None":
LOG.debug("log-type: %s is not supported. None is only supported.", log_type)
return LambdaErrorResponses.not_implemented_locally(
"log-type: {} is not supported. None is only supported.".format(log_type)
)
invocation_type = request_headers.get("X-Amz-Invocation-Type", "RequestResponse")
if invocation_type != "RequestResponse":
LOG.warning("invocation-type: %s is not supported. RequestResponse is only supported.", invocation_type)
return LambdaErrorResponses.not_implemented_locally(
"invocation-type: {} is not supported. RequestResponse is only supported.".format(invocation_type)
)
def _construct_error_handling(self):
"""
Updates the Flask app with Error Handlers for different Error Codes
"""
self._app.register_error_handler(500, LambdaErrorResponses.generic_service_exception)
self._app.register_error_handler(404, LambdaErrorResponses.generic_path_not_found)
self._app.register_error_handler(405, LambdaErrorResponses.generic_method_not_allowed)
def _invoke_request_handler(self, function_name):
"""
Request Handler for the Local Lambda Invoke path. This method is responsible for understanding the incoming
request and invoking the Local Lambda Function
Parameters
----------
function_name str
Name of the function to invoke
Returns
-------
A Flask Response response object as if it was returned from Lambda
"""
flask_request = request
request_data = flask_request.get_data()
if not request_data:
request_data = b"{}"
request_data = request_data.decode("utf-8")
stdout_stream = io.BytesIO()
stdout_stream_writer = StreamWriter(stdout_stream, self.is_debugging)
try:
self.lambda_runner.invoke(function_name, request_data, stdout=stdout_stream_writer, stderr=self.stderr)
except FunctionNotFound:
LOG.debug("%s was not found to invoke.", function_name)
return LambdaErrorResponses.resource_not_found(function_name)
lambda_response, lambda_logs, is_lambda_user_error_response = LambdaOutputParser.get_lambda_output(
stdout_stream
)
if self.stderr and lambda_logs:
# Write the logs to stderr if available.
self.stderr.write(lambda_logs)
if is_lambda_user_error_response:
return self.service_response(
lambda_response, {"Content-Type": "application/json", "x-amz-function-error": "Unhandled"}, 200
)
return self.service_response(lambda_response, {"Content-Type": "application/json"}, 200)
|
py | b41073333da12bcc95f70581c6a4e50fa94a1c55 | # Copyright 2018 Datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from typing import Any, Callable, Dict, Iterable, List, Optional, Union, ValuesView
from typing import cast as typecast
import json
import logging
import os
from ipaddress import ip_address
from ..constants import Constants
from ..utils import RichStatus, SavedSecret, SecretHandler, SecretInfo
from ..config import Config
from .irresource import IRResource
from .irambassador import IRAmbassador
from .irauth import IRAuth
from .irfilter import IRFilter
from .ircluster import IRCluster
from .irbasemappinggroup import IRBaseMappingGroup
from .irbasemapping import IRBaseMapping
from .irhttpmapping import IRHTTPMapping
from .irhost import IRHost, HostFactory
from .irmappingfactory import MappingFactory
from .irratelimit import IRRateLimit
from .irtls import TLSModuleFactory, IRAmbassadorTLS
from .irlistener import ListenerFactory, IRListener
from .irlogservice import IRLogService, IRLogServiceFactory
from .irtracing import IRTracing
from .irtlscontext import IRTLSContext, TLSContextFactory
from .irserviceresolver import IRServiceResolver, IRServiceResolverFactory, SvcEndpointSet
from ..VERSION import Version, Build
#############################################################################
## ir.py -- the Ambassador Intermediate Representation (IR)
##
## After getting an ambassador.Config, you can create an ambassador.IR. The
## IR is the basis for everything else: you can use it to configure an Envoy
## or to run diagnostics.
class IR:
ambassador_module: IRAmbassador
ambassador_id: str
ambassador_namespace: str
ambassador_nodename: str
aconf: Config
clusters: Dict[str, IRCluster]
agent_active: bool
agent_service: Optional[str]
agent_origination_ctx: Optional[IRTLSContext]
edge_stack_allowed: bool
file_checker: Callable[[str], bool]
filters: List[IRFilter]
groups: Dict[str, IRBaseMappingGroup]
grpc_services: Dict[str, IRCluster]
hosts: Dict[str, IRHost]
listeners: List[IRListener]
log_services: Dict[str, IRLogService]
ratelimit: Optional[IRRateLimit]
redirect_cleartext_from: Optional[int]
resolvers: Dict[str, IRServiceResolver]
router_config: Dict[str, Any]
saved_resources: Dict[str, IRResource]
saved_secrets: Dict[str, SavedSecret]
secret_handler: SecretHandler
secret_root: str
sidecar_cluster_name: Optional[str]
tls_contexts: Dict[str, IRTLSContext]
tls_module: Optional[IRAmbassadorTLS]
tracing: Optional[IRTracing]
def __init__(self, aconf: Config, secret_handler=None, file_checker=None, logger=None, watch_only=False) -> None:
self.ambassador_id = Config.ambassador_id
self.ambassador_namespace = Config.ambassador_namespace
self.ambassador_nodename = aconf.ambassador_nodename
self.statsd = aconf.statsd
self.logger = logger or logging.getLogger("ambassador.ir")
# We're using setattr since since mypy complains about assigning directly to a method.
secret_root = os.environ.get('AMBASSADOR_CONFIG_BASE_DIR', "/ambassador")
setattr(self, 'file_checker', file_checker if file_checker is not None else os.path.isfile)
# The secret_handler is _required_.
self.secret_handler = secret_handler
assert self.secret_handler, "Ambassador.IR requires a SecretHandler at initialization"
self.logger.debug("IR __init__:")
self.logger.debug("IR: Version %s built from %s on %s" % (Version, Build.git.commit, Build.git.branch))
self.logger.debug("IR: AMBASSADOR_ID %s" % self.ambassador_id)
self.logger.debug("IR: Namespace %s" % self.ambassador_namespace)
self.logger.debug("IR: Nodename %s" % self.ambassador_nodename)
self.logger.debug("IR: Endpoints %s" % "enabled" if Config.enable_endpoints else "disabled")
self.logger.debug("IR: file checker: %s" % getattr(self, 'file_checker').__name__)
self.logger.debug("IR: secret handler: %s" % type(self.secret_handler).__name__)
# First up: save the Config object. Its source map may be necessary later.
self.aconf = aconf
# Next, we'll want a way to keep track of resources we end up working
# with. It starts out empty.
self.saved_resources = {}
# Also, we have no saved secret stuff yet...
self.saved_secrets = {}
self.secret_info: Dict[str, SecretInfo] = {}
# ...and the initial IR state is empty _except for k8s_status_updates_.
#
# Note that we use a map for clusters, not a list -- the reason is that
# multiple mappings can use the same service, and we don't want multiple
# clusters.
self.breakers = {}
self.clusters = {}
self.filters = []
self.groups = {}
self.grpc_services = {}
self.hosts = {}
# self.k8s_status_updates is handled below.
self.listeners = []
self.log_services = {}
self.outliers = {}
self.ratelimit = None
self.redirect_cleartext_from = None
self.resolvers = {}
self.saved_secrets = {}
self.secret_info = {}
self.services = {}
self.sidecar_cluster_name = None
self.tls_contexts = {}
self.tls_module = None
self.tracing = None
# Copy k8s_status_updates from our aconf.
self.k8s_status_updates = aconf.k8s_status_updates
# Check on the intercept agent and edge stack. Note that the Edge Stack touchfile is _not_
# within $AMBASSADOR_CONFIG_BASE_DIR: it stays in /ambassador no matter what.
self.agent_active = (os.environ.get("AGENT_SERVICE", None) != None)
self.edge_stack_allowed = os.path.exists('/ambassador/.edge_stack')
self.agent_origination_ctx = None
# OK, time to get this show on the road. First things first: set up the
# Ambassador module.
#
# The Ambassador module is special: it doesn't do anything in its setup() method, but
# instead defers all its heavy lifting to its finalize() method. Why? Because we need
# to create the Ambassador module very early to allow IRResource.lookup() to work, but
# we need to go pull in secrets and such before we can get all the Ambassador-module
# stuff fully set up.
#
# So. First, create the module.
self.ambassador_module = typecast(IRAmbassador, self.save_resource(IRAmbassador(self, aconf)))
# Next, grab whatever information our aconf has about secrets...
self.save_secret_info(aconf)
# ...and then it's on to default TLS stuff, both from the TLS module and from
# any TLS contexts.
#
# XXX This feels like a hack -- shouldn't it be class-wide initialization
# in TLSModule or TLSContext? So far it's the only place we need anything like
# this though.
TLSModuleFactory.load_all(self, aconf)
TLSContextFactory.load_all(self, aconf)
# ...then grab whatever we know about Hosts...
HostFactory.load_all(self, aconf)
# ...then set up for the intercept agent, if that's a thing.
self.agent_init(aconf)
# Finally, finalize all the Host stuff (including the !*@#&!* fallback context).
HostFactory.finalize(self, aconf)
# Now we can finalize the Ambassador module, to tidy up secrets et al. We do this
# here so that secrets and TLS contexts are available.
if not self.ambassador_module.finalize(self, aconf):
# Uhoh.
self.ambassador_module.set_active(False) # This can't be good.
_activity_str = 'watching' if watch_only else 'starting'
_mode_str = 'OSS'
if self.agent_active:
_mode_str = 'Intercept Agent'
elif self.edge_stack_allowed:
_mode_str = 'Edge Stack'
self.logger.debug(f"IR: {_activity_str} {_mode_str}")
# Next up, initialize our IRServiceResolvers...
IRServiceResolverFactory.load_all(self, aconf)
# ...and then we can finalize the agent, if that's a thing.
self.agent_finalize(aconf)
# Once here, if we're only watching, we're done.
if watch_only:
return
# REMEMBER FOR SAVING YOU NEED TO CALL save_resource!
# THIS IS VERY IMPORTANT!
# Save circuit breakers, outliers, and services.
self.breakers = aconf.get_config("CircuitBreaker") or {}
self.outliers = aconf.get_config("OutlierDetection") or {}
self.services = aconf.get_config("service") or {}
# Save tracing, ratelimit, and logging settings.
self.tracing = typecast(IRTracing, self.save_resource(IRTracing(self, aconf)))
self.ratelimit = typecast(IRRateLimit, self.save_resource(IRRateLimit(self, aconf)))
IRLogServiceFactory.load_all(self, aconf)
# After the Ambassador and TLS modules are done, we need to set up the
# filter chains. Note that order of the filters matters. Start with auth,
# since it needs to be able to override everything...
self.save_filter(IRAuth(self, aconf))
# ...then deal with the non-configurable cors filter...
self.save_filter(IRFilter(ir=self, aconf=aconf,
rkey="ir.cors", kind="ir.cors", name="cors",
config={}))
# ...then the ratelimit filter...
if self.ratelimit:
self.save_filter(self.ratelimit, already_saved=True)
# ...and, finally, the barely-configurable router filter.
router_config = {}
if self.tracing:
router_config['start_child_span'] = True
self.save_filter(IRFilter(ir=self, aconf=aconf,
rkey="ir.router", kind="ir.router", name="router", type="decoder",
config=router_config))
# We would handle other modules here -- but guess what? There aren't any.
# At this point ambassador, tls, and the deprecated auth module are all there
# are, and they're handled above. So. At this point go sort out all the Mappings
ListenerFactory.load_all(self, aconf)
MappingFactory.load_all(self, aconf)
self.walk_saved_resources(aconf, 'add_mappings')
TLSModuleFactory.finalize(self, aconf)
ListenerFactory.finalize(self, aconf)
MappingFactory.finalize(self, aconf)
# At this point we should know the full set of clusters, so we can normalize
# any long cluster names.
collisions: Dict[str, List[str]] = {}
for name in sorted(self.clusters.keys()):
if len(name) > 60:
# Too long.
short_name = name[0:40]
collision_list = collisions.setdefault(short_name, [])
collision_list.append(name)
for short_name in sorted(collisions.keys()):
name_list = collisions[short_name]
i = 0
for name in sorted(name_list):
mangled_name = "%s-%d" % (short_name, i)
i += 1
self.logger.debug("%s => %s" % (name, mangled_name))
self.clusters[name]['name'] = mangled_name
# After we have the cluster names fixed up, go finalize filters.
if self.tracing:
self.tracing.finalize()
if self.ratelimit:
self.ratelimit.finalize()
for filter in self.filters:
filter.finalize()
# XXX Brutal hackery here! Probably this is a clue that Config and IR and such should have
# a common container that can hold errors.
def post_error(self, rc: Union[str, RichStatus], resource: Optional[IRResource]=None, rkey: Optional[str]=None, log_level=logging.INFO):
self.aconf.post_error(rc, resource=resource, rkey=rkey, log_level=log_level)
def agent_init(self, aconf: Config) -> None:
"""
Initialize as the Intercept Agent, if we're doing that.
THIS WHOLE METHOD NEEDS TO GO AWAY: instead, just configure the agent with CRDs as usual.
However, that's just too painful to contemplate without `edgectl inject-agent`.
:param aconf: Config to work with
:return: None
"""
# Intercept stuff is an Edge Stack thing.
if not (self.edge_stack_allowed and self.agent_active):
self.logger.debug("Intercept agent not active, skipping initialization")
return
self.agent_service = os.environ.get("AGENT_SERVICE", None)
if self.agent_service is None:
# This is technically impossible, but whatever.
self.logger.info("Intercept agent active but no AGENT_SERVICE? skipping initialization")
self.agent_active = False
return
self.logger.debug(f"Intercept agent active for {self.agent_service}, initializing")
# We're going to either create a Host to terminate TLS, or to do cleartext. In neither
# case will we do ACME. Set additionalPort to -1 so we don't grab 8080 in the TLS case.
host_args = {
"hostname": "*",
"selector": {
"matchLabels": {
"intercept": self.agent_service
}
},
"acmeProvider": {
"authority": "none"
},
"requestPolicy": {
"insecure": {
"additionalPort": -1,
},
},
}
# Have they asked us to do TLS?
agent_termination_secret = os.environ.get("AGENT_TLS_TERM_SECRET", None)
if agent_termination_secret:
# Yup.
host_args["tlsSecret"] = { "name": agent_termination_secret }
else:
# No termination secret, so do cleartext.
host_args["requestPolicy"]["insecure"]["action"] = "Route"
host = IRHost(self, aconf, rkey=self.ambassador_module.rkey, location=self.ambassador_module.location,
name="agent-host",
**host_args)
if host.is_active():
host.referenced_by(self.ambassador_module)
host.sourced_by(self.ambassador_module)
self.logger.debug(f"Intercept agent: saving host {host.pretty()}")
# self.logger.debug(host.as_json())
self.save_host(host)
else:
self.logger.debug(f"Intercept agent: not saving inactive host {host.pretty()}")
# How about originating TLS?
agent_origination_secret = os.environ.get("AGENT_TLS_ORIG_SECRET", None)
if agent_origination_secret:
# Uhhhh. Synthesize a TLSContext for this, I guess.
#
# XXX What if they already have a context with this name?
ctx = IRTLSContext(self, aconf, rkey=self.ambassador_module.rkey, location=self.ambassador_module.location,
name="agent-origination-context",
secret=agent_origination_secret)
ctx.referenced_by(self.ambassador_module)
self.save_tls_context(ctx)
self.logger.debug(f"Intercept agent: saving origination TLSContext {ctx.name}")
# self.logger.debug(ctx.as_json())
self.agent_origination_ctx = ctx
def agent_finalize(self, aconf) -> None:
if not (self.edge_stack_allowed and self.agent_active):
self.logger.debug(f"Intercept agent not active, skipping finalization")
return
# self.logger.info(f"Intercept agent active for {self.agent_service}, finalizing")
# We don't want to listen on the default AES ports (8080, 8443) as that is likely to
# conflict with the user's application running in the same Pod.
agent_listen_port_str = os.environ.get("AGENT_LISTEN_PORT", None)
if agent_listen_port_str is None:
self.ambassador_module.service_port = Constants.SERVICE_PORT_AGENT
else:
try:
self.ambassador_module.service_port = int(agent_listen_port_str)
except ValueError:
self.post_error(f"Intercept agent listen port {agent_listen_port_str} is not valid")
self.agent_active = False
return
agent_port_str = os.environ.get("AGENT_PORT", None)
if agent_port_str is None:
self.post_error("Intercept agent requires both AGENT_SERVICE and AGENT_PORT to be set")
self.agent_active = False
return
agent_port = -1
try:
agent_port = int(agent_port_str)
except:
self.post_error(f"Intercept agent port {agent_port_str} is not valid")
self.agent_active = False
return
# self.logger.info(f"Intercept agent active for {self.agent_service}:{agent_port}, adding fallback mapping")
# XXX OMG this is a crock. Don't use precedence -1000000 for this, because otherwise Edge
# Stack might decide it's the Edge Policy Console fallback mapping and force it to be
# routed insecure. !*@&#*!@&#* We need per-mapping security settings.
#
# XXX What if they already have a mapping with this name?
ctx_name = None
if self.agent_origination_ctx:
ctx_name = self.agent_origination_ctx.name
mapping = IRHTTPMapping(self, aconf, rkey=self.ambassador_module.rkey, location=self.ambassador_module.location,
name="agent-fallback-mapping",
metadata_labels={"ambassador_diag_class": "private"},
prefix="/",
rewrite="/",
service=f"127.0.0.1:{agent_port}",
tls=ctx_name,
precedence=-999999) # No, really. See comment above.
mapping.referenced_by(self.ambassador_module)
self.add_mapping(aconf, mapping)
def save_resource(self, resource: IRResource) -> IRResource:
if resource.is_active():
self.saved_resources[resource.rkey] = resource
return resource
def save_host(self, host: IRHost) -> None:
extant_host = self.hosts.get(host.name, None)
is_valid = True
if extant_host:
self.post_error("Duplicate Host %s; keeping definition from %s" % (host.name, extant_host.location))
is_valid = False
if is_valid:
self.hosts[host.name] = host
# Get saved hosts.
def get_hosts(self) -> List[IRHost]:
return list(self.hosts.values())
# Save secrets from our aconf.
def save_secret_info(self, aconf):
aconf_secrets = aconf.get_config("secrets") or {}
self.logger.debug(f"IR: aconf has secrets: {aconf_secrets.keys()}")
for secret_key, aconf_secret in aconf_secrets.items():
# Ignore anything that doesn't at least have a public half.
if aconf_secret.get('tls_crt') or aconf_secret.get('cert-chain_pem'):
secret_info = SecretInfo.from_aconf_secret(aconf_secret)
secret_name = secret_info.name
secret_namespace = secret_info.namespace
self.logger.debug(f'saving {secret_name}.{secret_namespace} (from {secret_key}) in secret_info')
self.secret_info[f'{secret_name}.{secret_namespace}'] = secret_info
def save_tls_context(self, ctx: IRTLSContext) -> None:
extant_ctx = self.tls_contexts.get(ctx.name, None)
is_valid = True
if extant_ctx:
self.post_error("Duplicate TLSContext %s; keeping definition from %s" % (ctx.name, extant_ctx.location))
is_valid = False
if ctx.get('redirect_cleartext_from', None) is not None:
if self.redirect_cleartext_from is None:
self.redirect_cleartext_from = ctx.redirect_cleartext_from
else:
if self.redirect_cleartext_from != ctx.redirect_cleartext_from:
self.post_error("TLSContext: %s; configured conflicting redirect_from port: %s" % (ctx.name, ctx.redirect_cleartext_from))
is_valid = False
if is_valid:
self.tls_contexts[ctx.name] = ctx
def get_resolver(self, name: str) -> Optional[IRServiceResolver]:
return self.resolvers.get(name, None)
def add_resolver(self, resolver: IRServiceResolver) -> None:
self.resolvers[resolver.name] = resolver
def has_tls_context(self, name: str) -> bool:
return bool(self.get_tls_context(name))
def get_tls_context(self, name: str) -> Optional[IRTLSContext]:
return self.tls_contexts.get(name, None)
def get_tls_contexts(self) -> ValuesView[IRTLSContext]:
return self.tls_contexts.values()
def resolve_secret(self, resource: IRResource, secret_name: str, namespace: str):
# OK. Do we already have a SavedSecret for this?
ss_key = f'{secret_name}.{namespace}'
ss = self.saved_secrets.get(ss_key, None)
if ss:
# Done. Return it.
self.logger.debug(f"resolve_secret {ss_key}: using cached SavedSecret")
self.secret_handler.still_needed(resource, secret_name, namespace)
return ss
# OK, do we have a secret_info for it??
# self.logger.debug(f"resolve_secret {ss_key}: checking secret_info")
secret_info = self.secret_info.get(ss_key, None)
if secret_info:
self.logger.debug(f"resolve_secret {ss_key}: found secret_info")
self.secret_handler.still_needed(resource, secret_name, namespace)
else:
# No secret_info, so ask the secret_handler to find us one.
self.logger.debug(f"resolve_secret {ss_key}: no secret_info, asking handler to load")
secret_info = self.secret_handler.load_secret(resource, secret_name, namespace)
if not secret_info:
self.logger.error(f"Secret {ss_key} unknown")
ss = SavedSecret(secret_name, namespace, None, None, None, None, None)
else:
self.logger.debug(f"resolve_secret {ss_key}: found secret, asking handler to cache")
# OK, we got a secret_info. Cache that using the secret handler.
ss = self.secret_handler.cache_secret(resource, secret_info)
# Save this for next time.
self.saved_secrets[secret_name] = ss
return ss
def resolve_targets(self, cluster: IRCluster, resolver_name: Optional[str],
hostname: str, namespace: str, port: int) -> Optional[SvcEndpointSet]:
# Is the host already an IP address?
is_ip_address = False
try:
x = ip_address(hostname)
is_ip_address = True
except ValueError:
pass
if is_ip_address:
# Already an IP address, great.
self.logger.debug(f'cluster {cluster.name}: {hostname} is already an IP address')
return [
{
'ip': hostname,
'port': port,
'target_kind': 'IPaddr'
}
]
# Which resolver should we use?
if not resolver_name:
resolver_name = self.ambassador_module.get('resolver', 'kubernetes-service')
# Casting to str is OK because the Ambassador module's resolver must be a string,
# so all the paths for resolver_name land with it being a string.
resolver = self.get_resolver(typecast(str, resolver_name))
# It should not be possible for resolver to be unset here.
if not resolver:
self.post_error(f"cluster {cluster.name} has invalid resolver {resolver_name}?", rkey=cluster.rkey)
return None
# OK, ask the resolver for the target list. Understanding the mechanics of resolution
# and the load balancer policy and all that is up to the resolver.
return resolver.resolve(self, cluster, hostname, namespace, port)
def save_filter(self, resource: IRFilter, already_saved=False) -> None:
if resource.is_active():
if not already_saved:
resource = typecast(IRFilter, self.save_resource(resource))
self.filters.append(resource)
def walk_saved_resources(self, aconf, method_name):
for res in self.saved_resources.values():
getattr(res, method_name)(self, aconf)
def add_listener(self, listener: IRListener) -> None:
self.listeners.append(listener)
def add_mapping(self, aconf: Config, mapping: IRBaseMapping) -> Optional[IRBaseMappingGroup]:
mapping.check_status()
if mapping.is_active():
if mapping.group_id not in self.groups:
group_name = "GROUP: %s" % mapping.name
group_class = mapping.group_class()
group = group_class(ir=self, aconf=aconf,
location=mapping.location,
name=group_name,
mapping=mapping)
self.groups[group.group_id] = group
else:
group = self.groups[mapping.group_id]
group.add_mapping(aconf, mapping)
return group
else:
return None
def ordered_groups(self) -> Iterable[IRBaseMappingGroup]:
return reversed(sorted(self.groups.values(), key=lambda x: x['group_weight']))
def has_cluster(self, name: str) -> bool:
return name in self.clusters
def get_cluster(self, name: str) -> Optional[IRCluster]:
return self.clusters.get(name, None)
def add_cluster(self, cluster: IRCluster) -> IRCluster:
if not self.has_cluster(cluster.name):
self.clusters[cluster.name] = cluster
if cluster.is_edge_stack_sidecar():
# self.logger.debug(f"IR: cluster {cluster.name} is the sidecar")
self.sidecar_cluster_name = cluster.name
return self.clusters[cluster.name]
def merge_cluster(self, cluster: IRCluster) -> bool:
extant = self.get_cluster(cluster.name)
if extant:
return extant.merge(cluster)
else:
self.add_cluster(cluster)
return True
def has_grpc_service(self, name: str) -> bool:
return name in self.grpc_services
def add_grpc_service(self, name: str, cluster: IRCluster) -> IRCluster:
if not self.has_grpc_service(name):
if not self.has_cluster(cluster.name):
self.clusters[cluster.name] = cluster
self.grpc_services[name] = cluster
return self.grpc_services[name]
def as_dict(self) -> Dict[str, Any]:
od = {
'identity': {
'ambassador_id': self.ambassador_id,
'ambassador_namespace': self.ambassador_namespace,
'ambassador_nodename': self.ambassador_nodename,
},
'ambassador': self.ambassador_module.as_dict(),
'clusters': { cluster_name: cluster.as_dict()
for cluster_name, cluster in self.clusters.items() },
'grpc_services': { svc_name: cluster.as_dict()
for svc_name, cluster in self.grpc_services.items() },
'hosts': [ host.as_dict() for host in self.hosts.values() ],
'listeners': [ listener.as_dict() for listener in self.listeners ],
'filters': [ filt.as_dict() for filt in self.filters ],
'groups': [ group.as_dict() for group in self.ordered_groups() ],
'tls_contexts': [ context.as_dict() for context in self.tls_contexts.values() ],
'services': self.services,
'k8s_status_updates': self.k8s_status_updates
}
if self.log_services:
od['log_services'] = [ srv.as_dict() for srv in self.log_services.values() ]
if self.tracing:
od['tracing'] = self.tracing.as_dict()
if self.ratelimit:
od['ratelimit'] = self.ratelimit.as_dict()
return od
def as_json(self) -> str:
return json.dumps(self.as_dict(), sort_keys=True, indent=4)
def features(self) -> Dict[str, Any]:
od: Dict[str, Union[bool, int, Optional[str]]] = {}
if self.aconf.helm_chart:
od['helm_chart'] = self.aconf.helm_chart
od['managed_by'] = self.aconf.pod_labels.get('app.kubernetes.io/managed-by', '')
tls_termination_count = 0 # TLS termination contexts
tls_origination_count = 0 # TLS origination contexts
using_tls_module = False
using_tls_contexts = False
for ctx in self.get_tls_contexts():
if ctx:
secret_info = ctx.get('secret_info', {})
if secret_info:
using_tls_contexts = True
if secret_info.get('certificate_chain_file', None):
tls_termination_count += 1
if secret_info.get('cacert_chain_file', None):
tls_origination_count += 1
if ctx.get('_legacy', False):
using_tls_module = True
od['tls_using_module'] = using_tls_module
od['tls_using_contexts'] = using_tls_contexts
od['tls_termination_count'] = tls_termination_count
od['tls_origination_count'] = tls_origination_count
for key in [ 'diagnostics', 'liveness_probe', 'readiness_probe', 'statsd' ]:
od[key] = self.ambassador_module.get(key, {}).get('enabled', False)
for key in [ 'use_proxy_proto', 'use_remote_address', 'x_forwarded_proto_redirect', 'enable_http10',
'add_linkerd_headers', 'use_ambassador_namespace_for_service_resolution', 'proper_case' ]:
od[key] = self.ambassador_module.get(key, False)
od['service_resource_total'] = len(list(self.services.keys()))
od['xff_num_trusted_hops'] = self.ambassador_module.get('xff_num_trusted_hops', 0)
od['listener_idle_timeout_ms'] = self.ambassador_module.get('listener_idle_timeout_ms', None)
od['server_name'] = bool(self.ambassador_module.server_name != 'envoy')
od['custom_ambassador_id'] = bool(self.ambassador_id != 'default')
default_port = Constants.SERVICE_PORT_HTTPS if tls_termination_count else Constants.SERVICE_PORT_HTTP
od['custom_listener_port'] = bool(self.ambassador_module.service_port != default_port)
od['custom_diag_port'] = bool(self.ambassador_module.diag_port != Constants.DIAG_PORT)
cluster_count = 0
cluster_grpc_count = 0 # clusters using GRPC upstream
cluster_http_count = 0 # clusters using HTTP or HTTPS upstream
cluster_tls_count = 0 # clusters using TLS origination
cluster_routing_kube_count = 0 # clusters routing using kube
cluster_routing_envoy_rr_count = 0 # clusters routing using envoy round robin
cluster_routing_envoy_rh_count = 0 # clusters routing using envoy ring hash
cluster_routing_envoy_maglev_count = 0 # clusters routing using envoy maglev
cluster_routing_envoy_lr_count = 0 # clusters routing using envoy least request
endpoint_grpc_count = 0 # endpoints using GRPC upstream
endpoint_http_count = 0 # endpoints using HTTP/HTTPS upstream
endpoint_tls_count = 0 # endpoints using TLS origination
endpoint_routing_kube_count = 0 # endpoints Kube is routing to
endpoint_routing_envoy_rr_count = 0 # endpoints Envoy round robin is routing to
endpoint_routing_envoy_rh_count = 0 # endpoints Envoy ring hash is routing to
endpoint_routing_envoy_maglev_count = 0 # endpoints Envoy maglev is routing to
endpoint_routing_envoy_lr_count = 0 # endpoints Envoy least request is routing to
for cluster in self.clusters.values():
cluster_count += 1
using_tls = False
using_http = False
using_grpc = False
lb_type = 'kube'
if cluster.get('enable_endpoints', False):
lb_type = cluster.get('lb_type', 'round_robin')
if lb_type == 'kube':
cluster_routing_kube_count += 1
elif lb_type == 'ring_hash':
cluster_routing_envoy_rh_count += 1
elif lb_type == 'maglev':
cluster_routing_envoy_maglev_count += 1
elif lb_type == 'least_request':
cluster_routing_envoy_lr_count += 1
else:
cluster_routing_envoy_rr_count += 1
if cluster.get('tls_context', None):
using_tls = True
cluster_tls_count += 1
if cluster.get('grpc', False):
using_grpc = True
cluster_grpc_count += 1
else:
using_http = True
cluster_http_count += 1
cluster_endpoints = cluster.urls if (lb_type == 'kube') else cluster.get('targets', [])
# Paranoia, really.
if not cluster_endpoints:
cluster_endpoints = []
num_endpoints = len(cluster_endpoints)
# self.logger.debug(f'cluster {cluster.name}: lb_type {lb_type}, endpoints {cluster_endpoints} ({num_endpoints})')
if using_tls:
endpoint_tls_count += num_endpoints
if using_http:
endpoint_http_count += num_endpoints
if using_grpc:
endpoint_grpc_count += num_endpoints
if lb_type == 'kube':
endpoint_routing_kube_count += num_endpoints
elif lb_type == 'ring_hash':
endpoint_routing_envoy_rh_count += num_endpoints
elif lb_type == 'maglev':
endpoint_routing_envoy_maglev_count += num_endpoints
elif lb_type == 'least_request':
endpoint_routing_envoy_lr_count += num_endpoints
else:
endpoint_routing_envoy_rr_count += num_endpoints
od['cluster_count'] = cluster_count
od['cluster_grpc_count'] = cluster_grpc_count
od['cluster_http_count'] = cluster_http_count
od['cluster_tls_count'] = cluster_tls_count
od['cluster_routing_kube_count'] = cluster_routing_kube_count
od['cluster_routing_envoy_rr_count'] = cluster_routing_envoy_rr_count
od['cluster_routing_envoy_rh_count'] = cluster_routing_envoy_rh_count
od['cluster_routing_envoy_maglev_count'] = cluster_routing_envoy_maglev_count
od['cluster_routing_envoy_lr_count'] = cluster_routing_envoy_lr_count
od['endpoint_routing'] = Config.enable_endpoints
od['endpoint_grpc_count'] = endpoint_grpc_count
od['endpoint_http_count'] = endpoint_http_count
od['endpoint_tls_count'] = endpoint_tls_count
od['endpoint_routing_kube_count'] = endpoint_routing_kube_count
od['endpoint_routing_envoy_rr_count'] = endpoint_routing_envoy_rr_count
od['endpoint_routing_envoy_rh_count'] = endpoint_routing_envoy_rh_count
od['endpoint_routing_envoy_maglev_count'] = endpoint_routing_envoy_maglev_count
od['endpoint_routing_envoy_lr_count'] = endpoint_routing_envoy_lr_count
od['cluster_ingress_count'] = self.aconf.get_count('knative_cluster_ingress')
od['knative_ingress_count'] = self.aconf.get_count('knative_ingress')
od['k8s_ingress_count'] = len(self.aconf.k8s_ingresses)
od['k8s_ingress_class_count'] = len(self.aconf.k8s_ingress_classes)
extauth = False
extauth_proto: Optional[str] = None
extauth_allow_body = False
extauth_host_count = 0
ratelimit = False
ratelimit_data_plane_proto = False
ratelimit_custom_domain = False
tracing = False
tracing_driver: Optional[str] = None
for filter in self.filters:
if filter.kind == 'IRAuth':
extauth = True
extauth_proto = filter.get('proto', 'http')
extauth_allow_body = filter.get('allow_request_body', False)
extauth_host_count = len(filter.hosts.keys())
if self.ratelimit:
ratelimit = True
ratelimit_data_plane_proto = self.ratelimit.get('data_plane_proto', False)
ratelimit_custom_domain = bool(self.ratelimit.domain != 'ambassador')
if self.tracing:
tracing = True
tracing_driver = self.tracing.driver
od['extauth'] = extauth
od['extauth_proto'] = extauth_proto
od['extauth_allow_body'] = extauth_allow_body
od['extauth_host_count'] = extauth_host_count
od['ratelimit'] = ratelimit
od['ratelimit_data_plane_proto'] = ratelimit_data_plane_proto
od['ratelimit_custom_domain'] = ratelimit_custom_domain
od['tracing'] = tracing
od['tracing_driver'] = tracing_driver
group_count = 0
group_http_count = 0 # HTTPMappingGroups
group_tcp_count = 0 # TCPMappingGroups
group_precedence_count = 0 # groups using explicit precedence
group_header_match_count = 0 # groups using header matches
group_regex_header_count = 0 # groups using regex header matches
group_regex_prefix_count = 0 # groups using regex prefix matches
group_shadow_count = 0 # groups using shadows
group_shadow_weighted_count = 0 # groups using shadows with non-100% weights
group_host_redirect_count = 0 # groups using host_redirect
group_host_rewrite_count = 0 # groups using host_rewrite
group_canary_count = 0 # groups coalescing multiple mappings
group_resolver_kube_service = 0 # groups using the KubernetesServiceResolver
group_resolver_kube_endpoint = 0 # groups using the KubernetesServiceResolver
group_resolver_consul = 0 # groups using the ConsulResolver
mapping_count = 0 # total mappings
for group in self.ordered_groups():
group_count += 1
if group.get('kind', "IRHTTPMappingGroup") == 'IRTCPMappingGroup':
group_tcp_count += 1
else:
group_http_count += 1
if group.get('precedence', 0) != 0:
group_precedence_count += 1
using_headers = False
using_regex_headers = False
for header in group.get('headers', []):
using_headers = True
if header['regex']:
using_regex_headers = True
break
if using_headers:
group_header_match_count += 1
if using_regex_headers:
group_regex_header_count += 1
if len(group.mappings) > 1:
group_canary_count += 1
mapping_count += len(group.mappings)
if group.get('shadows', []):
group_shadow_count += 1
if group.get('weight', 100) != 100:
group_shadow_weighted_count += 1
if group.get('host_redirect', {}):
group_host_redirect_count += 1
if group.get('host_rewrite', None):
group_host_rewrite_count += 1
res_name = group.get('resolver', self.ambassador_module.get('resolver', 'kubernetes-service'))
resolver = self.get_resolver(res_name)
if resolver:
if resolver.kind == 'KubernetesServiceResolver':
group_resolver_kube_service += 1
elif resolver.kind == 'KubernetesEndpoinhResolver':
group_resolver_kube_endpoint += 1
elif resolver.kind == 'ConsulResolver':
group_resolver_consul += 1
od['group_count'] = group_count
od['group_http_count'] = group_http_count
od['group_tcp_count'] = group_tcp_count
od['group_precedence_count'] = group_precedence_count
od['group_header_match_count'] = group_header_match_count
od['group_regex_header_count'] = group_regex_header_count
od['group_regex_prefix_count'] = group_regex_prefix_count
od['group_shadow_count'] = group_shadow_count
od['group_shadow_weighted_count'] = group_shadow_weighted_count
od['group_host_redirect_count'] = group_host_redirect_count
od['group_host_rewrite_count'] = group_host_rewrite_count
od['group_canary_count'] = group_canary_count
od['group_resolver_kube_service'] = group_resolver_kube_service
od['group_resolver_kube_endpoint'] = group_resolver_kube_endpoint
od['group_resolver_consul'] = group_resolver_consul
od['mapping_count'] = mapping_count
od['listener_count'] = len(self.listeners)
od['host_count'] = len(self.hosts)
return od
|
py | b410737f91987df8271bf47fbe9d50f4216a3725 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 [email protected]
# Licensed under the MIT license (http://opensource.org/licenses/MIT)
from __future__ import absolute_import, division, print_function, unicode_literals
import efm8boot.records
import efm8boot.ids
from efm8boot.bootloader import EFM8Bootloader
from efm8boot.hid_bootloader import EFM8BootloaderHID, find_devices
if __name__ == '__main__':
from efm8boot.records import *
|
py | b410747dd892caf87c2b3ce9c8bf0a48aa058e9f | from ._compat import PY2, text_type, long_type, JYTHON, IRONPYTHON
import datetime
from decimal import Decimal
import re
import time
from .constants import FIELD_TYPE, FLAG
from .charset import charset_by_id, charset_to_encoding
ESCAPE_REGEX = re.compile(r"[\0\n\r\032\'\"\\]")
ESCAPE_MAP = {'\0': '\\0', '\n': '\\n', '\r': '\\r', '\032': '\\Z',
'\'': '\\\'', '"': '\\"', '\\': '\\\\'}
def escape_item(val, charset, mapping=None):
if mapping is None:
mapping = encoders
encoder = mapping.get(type(val))
# Fallback to default when no encoder found
if not encoder:
try:
encoder = mapping[text_type]
except KeyError:
raise TypeError("no default type converter defined")
if encoder in (escape_dict, escape_sequence):
val = encoder(val, charset, mapping)
else:
val = encoder(val, mapping)
return val
def escape_dict(val, charset, mapping=None):
n = {}
for k, v in val.items():
quoted = escape_item(v, charset, mapping)
n[k] = quoted
return n
def escape_sequence(val, charset, mapping=None):
n = []
for item in val:
quoted = escape_item(item, charset, mapping)
n.append(quoted)
return "(" + ",".join(n) + ")"
def escape_set(val, charset, mapping=None):
val = map(lambda x: escape_item(x, charset, mapping), val)
return ','.join(val)
def escape_bool(value, mapping=None):
return str(int(value))
def escape_object(value, mapping=None):
return str(value)
def escape_int(value, mapping=None):
return str(value)
def escape_float(value, mapping=None):
return ('%.15g' % value)
def escape_string(value, mapping=None):
return ("%s" % (ESCAPE_REGEX.sub(
lambda match: ESCAPE_MAP.get(match.group(0)), value),))
def escape_str(value, mapping=None):
return "'%s'" % escape_string(value, mapping)
def escape_unicode(value, mapping=None):
return escape_str(value, mapping)
def escape_bytes(value, mapping=None):
# escape_bytes is calld only on Python 3.
return escape_str(value.decode('ascii', 'surrogateescape'), mapping)
def escape_None(value, mapping=None):
return 'NULL'
def escape_timedelta(obj, mapping=None):
seconds = int(obj.seconds) % 60
minutes = int(obj.seconds // 60) % 60
hours = int(obj.seconds // 3600) % 24 + int(obj.days) * 24
if obj.microseconds:
fmt = "'{0:02d}:{1:02d}:{2:02d}.{3:06d}'"
else:
fmt = "'{0:02d}:{1:02d}:{2:02d}'"
return fmt.format(hours, minutes, seconds, obj.microseconds)
def escape_time(obj, mapping=None):
if obj.microsecond:
fmt = "'{0.hour:02}:{0.minute:02}:{0.second:02}.{0.microsecond:06}'"
else:
fmt = "'{0.hour:02}:{0.minute:02}:{0.second:02}'"
return fmt.format(obj)
def escape_datetime(obj, mapping=None):
if obj.microsecond:
fmt = "'{0.year:04}-{0.month:02}-{0.day:02} {0.hour:02}:" \
"{0.minute:02}:{0.second:02}.{0.microsecond:06}'"
else:
fmt = "'{0.year:04}-{0.month:02}-{0.day:02} {0.hour:02}:" \
"{0.minute:02}:{0.second:02}'"
return fmt.format(obj)
def escape_date(obj, mapping=None):
fmt = "'{0.year:04}-{0.month:02}-{0.day:02}'"
return fmt.format(obj)
def escape_struct_time(obj, mapping=None):
return escape_datetime(datetime.datetime(*obj[:6]))
def convert_datetime(obj):
"""Returns a DATETIME or TIMESTAMP column value as a datetime object:
datetime_or_None('2007-02-25 23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
datetime_or_None('2007-02-25T23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
Illegal values are returned as None:
datetime_or_None('2007-02-31T23:06:20') is None
True
datetime_or_None('0000-00-00 00:00:00') is None
True
"""
if ' ' in obj:
sep = ' '
elif 'T' in obj:
sep = 'T'
else:
return convert_date(obj)
try:
ymd, hms = obj.split(sep, 1)
usecs = '0'
if '.' in hms:
hms, usecs = hms.split('.')
usecs = float('0.' + usecs) * 1e6
return datetime.datetime(*[ int(x) for x in ymd.split('-')+
hms.split(':')+[usecs] ])
except ValueError:
return convert_date(obj)
def convert_timedelta(obj):
"""Returns a TIME column as a timedelta object:
timedelta_or_None('25:06:17')
datetime.timedelta(1, 3977)
timedelta_or_None('-25:06:17')
datetime.timedelta(-2, 83177)
Illegal values are returned as None:
timedelta_or_None('random crap') is None
True
Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but
can accept values as (+|-)DD HH:MM:SS. The latter format will not
be parsed correctly by this function.
"""
try:
microseconds = 0
if "." in obj:
(obj, tail) = obj.split('.')
microseconds = float('0.' + tail) * 1e6
hours, minutes, seconds = obj.split(':')
negate = 1
if hours.startswith("-"):
hours = hours[1:]
negate = -1
tdelta = datetime.timedelta(
hours = int(hours),
minutes = int(minutes),
seconds = int(seconds),
microseconds = int(microseconds)
) * negate
return tdelta
except ValueError:
return None
def convert_time(obj):
"""Returns a TIME column as a time object:
time_or_None('15:06:17')
datetime.time(15, 6, 17)
Illegal values are returned as None:
time_or_None('-25:06:17') is None
True
time_or_None('random crap') is None
True
Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but
can accept values as (+|-)DD HH:MM:SS. The latter format will not
be parsed correctly by this function.
Also note that MySQL's TIME column corresponds more closely to
Python's timedelta and not time. However if you want TIME columns
to be treated as time-of-day and not a time offset, then you can
use set this function as the converter for FIELD_TYPE.TIME.
"""
try:
microseconds = 0
if "." in obj:
(obj, tail) = obj.split('.')
microseconds = float('0.' + tail) * 1e6
hours, minutes, seconds = obj.split(':')
return datetime.time(hour=int(hours), minute=int(minutes),
second=int(seconds), microsecond=int(microseconds))
except ValueError:
return None
def convert_date(obj):
"""Returns a DATE column as a date object:
date_or_None('2007-02-26')
datetime.date(2007, 2, 26)
Illegal values are returned as None:
date_or_None('2007-02-31') is None
True
date_or_None('0000-00-00') is None
True
"""
try:
return datetime.date(*[ int(x) for x in obj.split('-', 2) ])
except ValueError:
return None
def convert_mysql_timestamp(timestamp):
"""Convert a MySQL TIMESTAMP to a Timestamp object.
MySQL >= 4.1 returns TIMESTAMP in the same format as DATETIME:
mysql_timestamp_converter('2007-02-25 22:32:17')
datetime.datetime(2007, 2, 25, 22, 32, 17)
MySQL < 4.1 uses a big string of numbers:
mysql_timestamp_converter('20070225223217')
datetime.datetime(2007, 2, 25, 22, 32, 17)
Illegal values are returned as None:
mysql_timestamp_converter('2007-02-31 22:32:17') is None
True
mysql_timestamp_converter('00000000000000') is None
True
"""
if timestamp[4] == '-':
return convert_datetime(timestamp)
timestamp += "0"*(14-len(timestamp)) # padding
year, month, day, hour, minute, second = \
int(timestamp[:4]), int(timestamp[4:6]), int(timestamp[6:8]), \
int(timestamp[8:10]), int(timestamp[10:12]), int(timestamp[12:14])
try:
return datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
def convert_set(s):
return set(s.split(","))
def through(x):
return x
convert_bit = through
def convert_characters(connection, field, data):
field_charset = charset_by_id(field.charsetnr).name
encoding = charset_to_encoding(field_charset)
if field.flags & FLAG.SET:
return convert_set(data.decode(encoding))
if field.flags & FLAG.BINARY:
return data
if connection.use_unicode:
data = data.decode(encoding)
elif connection.charset != field_charset:
data = data.decode(encoding)
data = data.encode(connection.encoding)
return data
encoders = {
bool: escape_bool,
int: escape_int,
long_type: escape_int,
float: escape_float,
str: escape_str,
text_type: escape_unicode,
tuple: escape_sequence,
list: escape_sequence,
set: escape_sequence,
dict: escape_dict,
type(None): escape_None,
datetime.date: escape_date,
datetime.datetime: escape_datetime,
datetime.timedelta: escape_timedelta,
datetime.time: escape_time,
time.struct_time: escape_struct_time,
Decimal: escape_object,
}
if not PY2 or JYTHON or IRONPYTHON:
encoders[bytes] = escape_bytes
decoders = {
FIELD_TYPE.BIT: convert_bit,
FIELD_TYPE.TINY: int,
FIELD_TYPE.SHORT: int,
FIELD_TYPE.LONG: int,
FIELD_TYPE.FLOAT: float,
FIELD_TYPE.DOUBLE: float,
FIELD_TYPE.LONGLONG: int,
FIELD_TYPE.INT24: int,
FIELD_TYPE.YEAR: int,
FIELD_TYPE.TIMESTAMP: convert_mysql_timestamp,
FIELD_TYPE.DATETIME: convert_datetime,
FIELD_TYPE.TIME: convert_timedelta,
FIELD_TYPE.DATE: convert_date,
FIELD_TYPE.SET: convert_set,
FIELD_TYPE.BLOB: through,
FIELD_TYPE.TINY_BLOB: through,
FIELD_TYPE.MEDIUM_BLOB: through,
FIELD_TYPE.LONG_BLOB: through,
FIELD_TYPE.STRING: through,
FIELD_TYPE.VAR_STRING: through,
FIELD_TYPE.VARCHAR: through,
FIELD_TYPE.DECIMAL: Decimal,
FIELD_TYPE.NEWDECIMAL: Decimal,
}
# for MySQLdb compatibility
conversions = decoders
def Thing2Literal(obj):
return escape_str(str(obj)) |
py | b410759c771e9894e2771879d264b235daeedba7 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from cloudfiles.errors import ContainerNotEmpty
from django import shortcuts
from django.contrib import messages
from django.utils.translation import ugettext as _
from horizon import api
from horizon import forms
LOG = logging.getLogger(__name__)
class DeleteContainer(forms.SelfHandlingForm):
container_name = forms.CharField(widget=forms.HiddenInput())
def handle(self, request, data):
try:
api.swift_delete_container(request, data['container_name'])
except ContainerNotEmpty, e:
messages.error(request,
_('Unable to delete non-empty container: %s') %
data['container_name'])
LOG.exception('Unable to delete container "%s". Exception: "%s"' %
(data['container_name'], str(e)))
else:
messages.info(request,
_('Successfully deleted container: %s') % \
data['container_name'])
return shortcuts.redirect(request.build_absolute_uri())
class CreateContainer(forms.SelfHandlingForm):
name = forms.CharField(max_length="255", label=_("Container Name"))
def handle(self, request, data):
api.swift_create_container(request, data['name'])
messages.success(request, _("Container was successfully created."))
return shortcuts.redirect("horizon:nova:containers:index")
class FilterObjects(forms.SelfHandlingForm):
container_name = forms.CharField(widget=forms.HiddenInput())
object_prefix = forms.CharField(required=False)
def handle(self, request, data):
object_prefix = data['object_prefix'] or None
objects, more = api.swift_get_objects(request,
data['container_name'],
prefix=object_prefix)
if not objects:
messages.info(request,
_('There are no objects matching that prefix in %s') %
data['container_name'])
return (objects, more)
class DeleteObject(forms.SelfHandlingForm):
object_name = forms.CharField(widget=forms.HiddenInput())
container_name = forms.CharField(widget=forms.HiddenInput())
def handle(self, request, data):
api.swift_delete_object(
request,
data['container_name'],
data['object_name'])
messages.info(request,
_('Successfully deleted object: %s') %
data['object_name'])
return shortcuts.redirect(request.build_absolute_uri())
class UploadObject(forms.SelfHandlingForm):
name = forms.CharField(max_length="255", label=_("Object Name"))
object_file = forms.FileField(label=_("File"))
container_name = forms.CharField(widget=forms.HiddenInput())
def handle(self, request, data):
api.swift_upload_object(
request,
data['container_name'],
data['name'],
self.files['object_file'].read())
messages.success(request, _("Object was successfully uploaded."))
return shortcuts.redirect("horizon:nova:containers:object_index",
data['container_name'])
class CopyObject(forms.SelfHandlingForm):
new_container_name = forms.ChoiceField(
label=_("Container to store object in"))
new_object_name = forms.CharField(max_length="255",
label=_("New object name"))
orig_container_name = forms.CharField(widget=forms.HiddenInput())
orig_object_name = forms.CharField(widget=forms.HiddenInput())
def __init__(self, *args, **kwargs):
containers = kwargs.pop('containers')
super(CopyObject, self).__init__(*args, **kwargs)
self.fields['new_container_name'].choices = containers
def handle(self, request, data):
orig_container_name = data['orig_container_name']
orig_object_name = data['orig_object_name']
new_container_name = data['new_container_name']
new_object_name = data['new_object_name']
api.swift_copy_object(request, orig_container_name,
orig_object_name, new_container_name,
new_object_name)
messages.success(request,
_('Object was successfully copied to %(container)s\%(obj)s') %
{"container": new_container_name, "obj": new_object_name})
return shortcuts.redirect("horizon:nova:containers:object_index",
data['new_container_name'])
|
py | b41075e6a892618d19b3c99adc238544aeda7ff1 | import socket
import json
from threading import Thread
from Helper.JsonHandler import JsonHandler
import pymongo
class Participant:
Status = True
payload = None
_currentTransationId = ""
_currentState = ""
def __init__(self):
self._JsonHandler = JsonHandler()
config = self._JsonHandler.LoadJson('Config.json')
# if config['IsPrimary'] :
# self.Status = False
# return
print(config)
try:
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client.connect((config['CordinatorHost'], config['CordinatorSocketPort']))
msg = {"type" : "connectRequest", "source" : "participant"}
self.client.send(bytes(json.dumps(msg), "utf8"))
#open mongo client
self.mongoClient = pymongo.MongoClient("mongodb://localhost:27017/")
dataBase = self.mongoClient['DBB_db']
self.PostCollection = dataBase['PostCollection']
except Exception as ex:
print(ex)
receive_thread = Thread(target=self.receive)
receive_thread.daemon = True
receive_thread.start()
# self.Run()
def __del__(self):
if self.Status:
self.client.close()
# def Run(self):
# while Status:
# continue
def receive(self):
while True:
try:
msg = self.client.recv(1024).decode("utf8")
if msg:
msgJson = json.loads(msg)
print(msgJson)
if msgJson['type'] == "ReadyToCommit":
#if it has become primary it will replay no and close the socket
_currentTransationId = msgJson['transactionId']
_currentState = "Prepare"
payload = msgJson['payload']
msg = {"transactionId" : _currentTransationId, "type" : "ReadyToCommit", "response": True}
self.client.send(bytes(json.dumps(msg), "utf8"))
elif msgJson['type'] == "Commit":
if msgJson['transactionId'] == _currentTransationId:
try:
_currentState = "Commit"
#save in the db
print("comitted" + str(payload))
querry = {'PayLoadToken' : payload['PayLoadToken']}
result = self.PostCollection.find(querry)
print("Result"+ str(result.count()))
if result.count() != 0:
msg = {"type" : "connectClose", "source" : "participant"}
self.client.send(bytes(json.dumps(msg), "utf8"))
self.client.close()
self.mongoClient.close()
self.Status = False
break
else:
self.PostCollection.insert(payload)
msg = {"transactionId" : _currentTransationId,"type" : "Commit", "response": True}
self.client.send(bytes(json.dumps(msg), "utf8"))
except Exception as ex:
print(ex)
elif msgJson['type'] == "Abort":
_currentState = "Abort"
msg = {"transactionId" : _currentTransationId,"type" : "Abort", "response": True}
self.client.send(bytes(json.dumps(msg), "utf8"))
except Exception as ex:
continue
|
py | b410762f37fd85966ee8e9a487992aff46981073 | # -*- coding: utf-8 -*-
"""
test_ipmt
----------------------------------
Tests for `ipmt` project.
"""
import os
from ipmt.migration import Version, Repository, Action, Plan, Branch
from ipmt.error import OperationError
import pytest
from mock import Mock
def test_repository_create(repository):
assert repository.is_empty
repository.create("test", None)
assert not repository.is_empty
assert os.path.exists(os.path.join(repository.path, "000001#test.py"))
repository.create("test2", None)
assert repository.root.find_version("000002") is not None
repository2 = Repository.load(repository.path)
assert repository2.root.find_version("000002") is not None
assert (
repository2.root.find_version("000002").prev.full_version == "000001"
)
def test_repository_meta(repository):
assert repository.meta is None
repository.meta = {"ok": True}
meta = repository.meta
assert meta["ok"]
assert os.path.exists(os.path.join(repository.path, "meta.yml"))
def test_version():
ver = Version(
["000001", "B1", "1"], "000001.B1.1_test.py", "test", None, None
)
assert ver.vpath == ["000001", "B1", "1"]
assert ver.name == "test"
assert ver.filename == "000001.B1.1_test.py"
assert ver.full_version == "000001.B1.1"
def test_action():
branch = Branch(["000001", "B1"], None, None)
prev_ver = Version(
["000001", "B1", "1"], "000001.B1.1_test.py", "test", None, branch
)
branch.append(prev_ver)
ver = Version(
["000001", "B1", "2"], "000001.B1.2_test.py", "test", None, branch
)
branch.append(ver)
ver.is_up_transactional = True
ver.is_down_transactional = True
ver.isolation_level_up = "read committed"
ver.isolation_level_down = "read committed"
ver.up = Mock()
ver.down = Mock()
up_db = Mock()
down_db = Mock()
action = Action(True, ver)
action.execute(up_db)
ver.up.assert_called_with(up_db)
up_db.ops_add.assert_called_with("up", "000001.B1.1", "000001.B1.2")
action = Action(False, ver)
action.execute(down_db)
ver.down.assert_called_with(down_db)
down_db.ops_add.assert_called_with("down", "000001.B1.2", "000001.B1.1")
def test_get_plan(repository):
assert repository.is_empty
repository.create("test", None)
repository.create("test", None)
v = repository.root.find_version("000002")
plan = Plan.get_plan(v, repository)
assert len(plan) == 2
assert plan[0].is_up
assert plan[0].version.full_version == "000001"
assert plan[1].is_up
assert plan[1].version.full_version == "000002"
def test_get_switch_plan(repository):
repository.create("test_0", None)
repository.create("test_1", "000001.B1")
repository.create("test_2", "000001.B1")
repository.create("test_0", None)
cur = repository.root.find_version("000001.B1.2")
tgt = repository.root.find_version("000002")
plan = Plan.get_switch_plan(cur, tgt, repository)
assert not plan[0].is_up
assert plan[0].version.full_version == "000001.B1.2"
assert not plan[1].is_up
assert plan[1].version.full_version == "000001.B1.1"
assert plan[2].is_up
assert plan[2].version.full_version == "000002"
def test_rebase(repository):
repository.create("test_0", None)
repository.create("test_1", "000001.B1")
repository.create("test_2", "000001.B1")
repository.create("test_0", None)
repository.rebase("000001.B1")
assert os.path.exists(os.path.join(repository.path, "000003#test_1.py"))
assert os.path.exists(os.path.join(repository.path, "000004#test_2.py"))
repository.current = Mock(return_value="000001")
repository._check_consistency(None)
repository.current = Mock(return_value="000001.B1.2")
with pytest.raises(OperationError):
repository._check_consistency(None)
def test_show(repository):
repository.create("test_0", None)
repository.create("test_1", "000001.B1")
res = repository.root.show()
assert "000001.B1.1" in res
def test_validate_version():
Version.validate_version("000001.B1.1")
with pytest.raises(OperationError):
Version.validate_version("000001.B1.AA")
|
py | b410765a873ff90717ffa919e42e44553a20eede | #!/usr/bin/python
# encoding: utf-8
import sys
#origin_path = sys.path
originPath=sys.path
sys.path.append("/home/ahmed/crnn")
import dataset
#sys.path = origin_path
sys.path = originPath
import lmdb
def writeCache(env, cache):
with env.begin(write=True) as txn:
for k, v in cache.iteritems():
txn.put(k, v)
def convert(originPath, outputPath):
args = [0] * 6
originDataset = dataset.lmdbDataset(originPath, 'abc', args)
print('Origin dataset has %d samples' % len(originDataset))
labelStrList = []
for i in range(len(originDataset)):
label = originDataset.getLabel(i + 1)
labelStrList.append(label)
if i % 10000 == 0:
print(i)
lengthList = [len(s) for s in labelStrList]
items = zip(lengthList, range(len(labelStrList)))
items.sort(key=lambda item: item[0])
env = lmdb.open(outputPath, map_size=1099511627776)
cnt = 1
cache = {}
nSamples = len(items)
for i in range(nSamples):
imageKey = 'image-%09d' % cnt
labelKey = 'label-%09d' % cnt
origin_i = items[i][1]
img, label = originDataset[origin_i + 1]
cache[labelKey] = label
cache[imageKey] = img
if cnt % 1000 == 0 or cnt == nSamples:
writeCache(env, cache)
cache = {}
print('Written %d / %d' % (cnt, nSamples))
cnt += 1
nSamples = cnt - 1
cache['num-samples'] = str(nSamples)
writeCache(env, cache)
print('Convert dataset with %d samples' % nSamples)
if __name__ == "__main__":
convert('/home/ahmed/Downloads/sample/output', '/home/ahmed/Downloads/sample/o_train')
|
py | b410777141b7715a9fc967960c5e4f21afd63707 | # Generated by Django 2.0.7 on 2018-07-11 18:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('store', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='storeitem',
name='event',
field=models.ForeignKey(blank=True, help_text='Tapahtuma johon tuote liittyy.', null=True, on_delete=django.db.models.deletion.PROTECT, to='kompomaatti.Event', verbose_name='Tapahtuma'),
),
migrations.AlterField(
model_name='transactionitem',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='store.StoreItem', verbose_name='Tuote'),
),
migrations.AlterField(
model_name='transactionitem',
name='variant',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='store.StoreItemVariant', verbose_name='Tuotevariantti'),
),
]
|
py | b410783d906f4f0256ef84d2e9e25ddea0836b97 | # ---------------------------------------------------------------------
# Zyxel.ZyNOS.add_vlan
# ---------------------------------------------------------------------
# Copyright (C) 2007-2017 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.iaddvlan import IAddVlan
class Script(BaseScript):
name = "Zyxel.ZyNOS.add_vlan"
interface = IAddVlan
def execute(self, vlan_id, name, tagged_ports):
with self.configure():
self.cli("vlan %d" % vlan_id)
self.cli("name %s" % name)
if tagged_ports:
for port in tagged_ports:
self.cli("fixed %s" % port)
self.cli("exit")
self.save_config()
return True
|
py | b410789b00c2ba035d534b35e27bda87d03cb650 | """
Standard DeepLabv3+ Model
This model is based on:
https://keras.io/examples/vision/deeplabv3_plus/
"""
import os
import cv2
import numpy as np
from glob import glob
from scipy.io import loadmat
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
def convolution_block(
block_input,
num_filters=256,
kernel_size=3,
dilation_rate=1,
padding="same",
use_bias=False,
):
x = layers.Conv2D(
num_filters,
kernel_size=kernel_size,
dilation_rate=dilation_rate,
padding="same",
use_bias=use_bias,
kernel_initializer=keras.initializers.HeNormal(),
)(block_input)
x = layers.BatchNormalization()(x)
return tf.nn.relu(x)
def DilatedSpatialPyramidPooling(dspp_input):
dims = dspp_input.shape
x = layers.AveragePooling2D(pool_size=(dims[-3], dims[-2]))(dspp_input)
x = convolution_block(x, kernel_size=1, use_bias=True)
out_pool = layers.UpSampling2D(
size=(dims[-3] // x.shape[1], dims[-2] // x.shape[2]), interpolation="bilinear",
)(x)
out_1 = convolution_block(dspp_input, kernel_size=1, dilation_rate=1)
out_6 = convolution_block(dspp_input, kernel_size=3, dilation_rate=6)
out_12 = convolution_block(dspp_input, kernel_size=3, dilation_rate=12)
out_18 = convolution_block(dspp_input, kernel_size=3, dilation_rate=18)
x = layers.Concatenate(axis=-1)([out_pool, out_1, out_6, out_12, out_18])
output = convolution_block(x, kernel_size=1)
return output
def DeeplabV3Plus(image_size, num_classes):
model_input = keras.Input(shape=(image_size, image_size, 3))
resnet50 = keras.applications.ResNet50(
weights="imagenet", include_top=False, input_tensor=model_input
)
x = resnet50.get_layer("conv4_block6_2_relu").output
x = DilatedSpatialPyramidPooling(x)
input_a = layers.UpSampling2D(
size=(image_size // 4 // x.shape[1], image_size // 4 // x.shape[2]),
interpolation="bilinear",
)(x)
input_b = resnet50.get_layer("conv2_block3_2_relu").output
input_b = convolution_block(input_b, num_filters=48, kernel_size=1)
x = layers.Concatenate(axis=-1)([input_a, input_b])
x = convolution_block(x)
x = convolution_block(x)
x = layers.UpSampling2D(
size=(image_size // x.shape[1], image_size // x.shape[2]),
interpolation="bilinear",
)(x)
model_output = layers.Conv2D(num_classes, kernel_size=(1, 1), padding="same")(x)
model = keras.Model(inputs=model_input, outputs=model_output)
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=0.001),
loss='binary_crossentropy',
metrics=["accuracy"],
)
return model |
py | b41078b61a4d71dc288791ea07743df8f48cccbf | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2018, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('image27.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image('B2', self.image_dir + 'mylogo.png')
workbook.close()
self.assertExcelEqual()
|
py | b41078cce4684abcbcab1cc60f2f6eecb4a92e0c | import singer
import urllib.request
from datetime import datetime,timezone
now = datetime.now(timezone.utc).isoformat()
schema = {
'properties': {
'ip': {'type': 'string'},
'timestamp': {'type': 'string', 'format': 'date-time'},
},
}
with urllib.request.urlopen('http://icanhazip.com') as response:
ip = response.read().decode('utf-8').strip()
singer.write_schema('my_ip', schema, 'timestamp')
singer.write_records('my_ip', [{'timestamp': now, 'ip': ip}])
|
py | b4107926faf6483cd8e2f7d971fe8bf16faf48d9 | from SciDataTool import Data1D
from SciDataTool.Functions import axes_dict, rev_axes_dict
from SciDataTool.Functions.conversions import get_unit_derivate, get_unit_integrate
def get_data_along(self, *args, unit="SI", is_norm=False, axis_data=[]):
"""Returns the sliced or interpolated version of the data, using conversions and symmetries if needed.
Parameters
----------
self: Data
a Data object
*args: list of strings
List of axes requested by the user, their units and values (optional)
unit: str
Unit requested by the user ("SI" by default)
is_norm: bool
Boolean indicating if the field must be normalized (False by default)
axis_data: list
list of ndarray corresponding to user-input data
Returns
-------
a DataND object
"""
if "dB" in unit:
results = self.get_magnitude_along(
*args, is_squeeze=False, unit=unit, is_norm=is_norm, axis_data=axis_data
)
else:
results = self.get_along(
*args, is_squeeze=False, unit=unit, is_norm=is_norm, axis_data=axis_data
)
values = results.pop(self.symbol)
del results["axes_dict_other"]
axes_list = results.pop("axes_list")
Axes = []
axes_name_new = list(results.keys())
if "time" in axes_name_new:
Data_type = "DataTime"
elif "freqs" in axes_name_new:
Data_type = "DataFreq"
else:
Data_type = "DataND"
# Dynamic import to avoid loop
module = __import__("SciDataTool.Classes." + Data_type, fromlist=[Data_type])
DataClass = getattr(module, Data_type)
for axis_name in axes_name_new:
if not isinstance(results[axis_name], str):
for i, axis in enumerate(self.axes):
if axis.name == axis_name:
index = i
name = axis.name
is_components = axis.is_components
axis_values = results[axis_name]
ax_unit = axis.unit
elif axis_name in axes_dict:
if axes_dict[axis_name][0] == axis.name:
index = i
name = axis_name
is_components = axis.is_components
axis_values = results[axis_name]
ax_unit = axes_dict[axis_name][2]
elif axis_name in rev_axes_dict:
if rev_axes_dict[axis_name][0] == axis.name:
index = i
name = axis_name
is_components = axis.is_components
axis_values = results[axis_name]
ax_unit = rev_axes_dict[axis_name][2]
# Update symmetries
if "smallestperiod" in args[index] or args[index] in [
"freqs",
"wavenumber",
]:
symmetries = self.axes[index].symmetries.copy()
else:
symmetries = dict()
Axes.append(
Data1D(
name=name,
unit=ax_unit,
values=axis_values,
is_components=is_components,
normalizations=self.axes[index].normalizations.copy(),
symmetries=symmetries,
).to_linspace()
)
# Update unit if dB/dBA conversion
if "dB" in unit:
unit = unit
else:
# Update unit if derivation or integration
unit = self.unit
for axis in axes_list:
if axis.extension in ["antiderivate", "integrate", "integrate_local"]:
unit = get_unit_integrate(unit, axis.corr_unit)
elif axis.extension == "derivate":
unit = get_unit_derivate(unit, axis.corr_unit)
return DataClass(
name=self.name,
unit=unit,
symbol=self.symbol,
axes=Axes,
values=values,
normalizations=self.normalizations.copy(),
is_real=self.is_real,
)
|
py | b41079b03868c6697d8be11005f97ef9dc613aa9 | # -*- coding: utf-8 -*-
# Copyright (c) 2013-2014 Will Thames <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import unittest
import os
from pathlib import Path
import ansiblelint.utils as utils
from importlib_metadata import version as get_dist_version
from packaging.version import Version
import pytest
class TestUtils(unittest.TestCase):
def test_tokenize_blank(self):
(cmd, args, kwargs) = utils.tokenize("")
self.assertEqual(cmd, '')
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_tokenize_single_word(self):
(cmd, args, kwargs) = utils.tokenize("vars:")
self.assertEqual(cmd, "vars")
def test_tokenize_string_module_and_arg(self):
(cmd, args, kwargs) = utils.tokenize("hello: a=1")
self.assertEqual(cmd, "hello")
self.assertEqual(kwargs, {"a": "1"})
def test_tokenize_strips_action(self):
(cmd, args, kwargs) = utils.tokenize("action: hello a=1")
self.assertEqual(cmd, "hello")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"a": "1"})
def test_tokenize_more_than_one_arg(self):
(cmd, args, kwargs) = utils.tokenize("action: whatever bobbins x=y z=x c=3")
self.assertEqual(cmd, "whatever")
self.assertEqual(args[0], "bobbins")
self.assertEqual(args[1], "x=y")
self.assertEqual(args[2], "z=x")
self.assertEqual(args[3], "c=3")
def test_tokenize_command_with_args(self):
cmd, args, kwargs = utils.tokenize("action: command chdir=wxy creates=zyx tar xzf zyx.tgz")
self.assertEqual(cmd, "command")
self.assertEqual(args[0], "tar")
self.assertEqual(args[1], "xzf")
self.assertEqual(args[2], "zyx.tgz")
self.assertEqual(kwargs, {'chdir': 'wxy', 'creates': 'zyx'})
def test_normalize_simple_command(self):
task1 = dict(name="hello", action="command chdir=abc echo hello world")
task2 = dict(name="hello", command="chdir=abc echo hello world")
self.assertEqual(
utils.normalize_task(task1, 'tasks.yml'),
utils.normalize_task(task2, 'tasks.yml'))
@pytest.mark.xfail(
Version(get_dist_version('ansible')) >= Version('2.10.dev0') and
Version(get_dist_version('ansible-base')) >= Version('2.10.dev0'),
reason='Post-split Ansible Core Engine does not have '
'the module used in the test playbook.'
' Ref: https://github.com/ansible/ansible-lint/issues/703.'
' Ref: https://github.com/ansible/ansible/pull/68598.',
raises=SystemExit,
strict=True,
)
def test_normalize_complex_command(self):
task1 = dict(name="hello", action={'module': 'ec2',
'region': 'us-east1',
'etc': 'whatever'})
task2 = dict(name="hello", ec2={'region': 'us-east1',
'etc': 'whatever'})
task3 = dict(name="hello", ec2="region=us-east1 etc=whatever")
task4 = dict(name="hello", action="ec2 region=us-east1 etc=whatever")
self.assertEqual(
utils.normalize_task(task1, 'tasks.yml'),
utils.normalize_task(task2, 'tasks.yml'))
self.assertEqual(
utils.normalize_task(task2, 'tasks.yml'),
utils.normalize_task(task3, 'tasks.yml'))
self.assertEqual(
utils.normalize_task(task3, 'tasks.yml'),
utils.normalize_task(task4, 'tasks.yml'))
def test_normalize_args(self):
task1 = {'git': {'version': 'abc'}, 'args': {'repo': 'blah', 'dest': 'xyz'}}
task2 = {'git': {'version': 'abc', 'repo': 'blah', 'dest': 'xyz'}}
task3 = {"git": 'version=abc repo=blah dest=xyz'}
task4 = {"git": None, "args": {'repo': 'blah', 'dest': 'xyz', 'version': 'abc'}}
self.assertEqual(
utils.normalize_task(task1, 'tasks.yml'),
utils.normalize_task(task2, 'tasks.yml'))
self.assertEqual(
utils.normalize_task(task1, 'tasks.yml'),
utils.normalize_task(task3, 'tasks.yml'))
self.assertEqual(
utils.normalize_task(task1, 'tasks.yml'),
utils.normalize_task(task4, 'tasks.yml'))
def test_extract_from_list(self):
block = {
'block': [{'tasks': {'name': 'hello', 'command': 'whoami'}}],
'test_none': None,
'test_string': 'foo',
}
blocks = [block]
test_list = utils.extract_from_list(blocks, ['block'])
test_none = utils.extract_from_list(blocks, ['test_none'])
self.assertEqual(list(block['block']), test_list)
self.assertEqual(list(), test_none)
with self.assertRaises(RuntimeError):
utils.extract_from_list(blocks, ['test_string'])
def test_simple_template(self):
v = "{{ playbook_dir }}"
result = utils.template('/a/b/c', v, dict(playbook_dir='/a/b/c'))
self.assertEqual(result, "/a/b/c")
def test_missing_filter(self):
v = "{{ 'hello' | doesnotexist }}"
result = utils.template('/a/b/c', v, dict(playbook_dir='/a/b/c'))
self.assertEqual(result, "{{ 'hello' | doesnotexist }}")
def test_existing_filter_on_unknown_var(self):
v = "{{ hello | to_json }}"
result = utils.template('/a/b/c', v, dict(playbook_dir='/a/b/c'))
self.assertEqual(result, "{{ hello | to_json }}")
def test_existing_filter_yaml_on_unknown_var(self):
v = "{{ hello | to_nice_yaml }}"
result = utils.template('/a/b/c', v, dict(playbook_dir='/a/b/c'))
self.assertEqual(result, "{{ hello | to_nice_yaml }}")
def test_task_to_str_unicode(self):
task = dict(fail=dict(msg=u"unicode é ô à"))
result = utils.task_to_str(utils.normalize_task(task, 'filename.yml'))
self.assertEqual(result, u"fail msg=unicode é ô à")
def test_normpath_with_path_object(self):
self.assertEqual(
utils.normpath(Path("a/b/../")),
"a")
def test_normpath_with_string(self):
self.assertEqual(
utils.normpath("a/b/../"),
"a")
def test_expand_path_vars(monkeypatch):
test_path = '/test/path'
monkeypatch.setenv('TEST_PATH', test_path)
assert utils.expand_path_vars('~') == os.path.expanduser('~')
assert utils.expand_path_vars('$TEST_PATH') == test_path
def test_expand_paths_vars(monkeypatch):
test_path = '/test/path'
monkeypatch.setenv('TEST_PATH', test_path)
assert utils.expand_paths_vars(['~']) == [os.path.expanduser('~')]
assert utils.expand_paths_vars(['$TEST_PATH']) == [test_path]
|
py | b4107b3a36262f9672afa0f054c44ff4494355d5 | """ Tests for invalid-name checker. """
# pylint: disable=unused-import, no-absolute-import, wrong-import-position
AAA = 24
try:
import collections
except ImportError:
collections = None
aaa = 42 # [invalid-name]
try:
import time
except ValueError:
time = None # [invalid-name]
try:
from sys import argv, executable as python
except ImportError:
argv = 42
python = 24
def test():
""" Shouldn't emit an invalid-name here. """
try:
import re
except ImportError:
re = None
return re
def a(): # [invalid-name]
"""yo"""
|
py | b4107cfbf38660941ff2adf4c1154a164fa2fb82 | """
Author: Dr. John T. Hwang <[email protected]>
This package is distributed under New BSD license.
N-dimensional robot arm problem.
"""
import numpy as np
from six.moves import range
from smt.utils.options_dictionary import OptionsDictionary
from smt.problems.problem import Problem
from smt.problems.reduced_problem import ReducedProblem
from smt.problems.robot_arm import RobotArm
class NdimRobotArm(Problem):
def __init__(self, ndim=1, w=0.2):
self.problem = ReducedProblem(
RobotArm(ndim=2 * (ndim + 1)), np.arange(3, 2 * (ndim + 1), 2), w=w
)
self.options = OptionsDictionary()
self.options.declare("ndim", ndim, types=int)
self.options.declare("return_complex", False, types=bool)
self.options.declare("name", "NdimRobotArm", types=str)
self.xlimits = self.problem.xlimits
def _evaluate(self, x, kx):
return self.problem._evaluate(x, kx)
|
py | b4107d0ff5c778b7343d6d5366ce70f39d6cb344 | #!/usr/bin/env python
import RPi.GPIO as GPIO
import time
DIO = 13
CLK = 12
STB = 11
LSBFIRST = 0
MSBFIRST = 1
tmp = 0
def _shiftOut(dataPin, clockPin, bitOrder, val):
for i in range(8):
if bitOrder == LSBFIRST:
GPIO.output(dataPin, val & (1 << i))
else:
GPIO.output(dataPin, val & (1 << (7 -i)))
GPIO.output(clockPin, True)
time.sleep(0.000001)
GPIO.output(clockPin, False)
time.sleep(0.000001)
def sendCommand(cmd):
GPIO.output(STB, False)
_shiftOut(DIO, CLK, LSBFIRST, cmd)
GPIO.output(STB, True)
def TM1638_init():
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(DIO, GPIO.OUT)
GPIO.setup(CLK, GPIO.OUT)
GPIO.setup(STB, GPIO.OUT)
sendCommand(0x8f)
def numberDisplay(num):
digits = [0x3f,0x06,0x5b,0x4f,0x66,0x6d,0x7d,0x07,0x7f,0x6f]
sendCommand(0x40)
GPIO.output(STB, False)
_shiftOut(DIO, CLK, LSBFIRST, 0xc0)
_shiftOut(DIO, CLK, LSBFIRST, digits[num/1000%10])
_shiftOut(DIO, CLK, LSBFIRST, 0x00)
_shiftOut(DIO, CLK, LSBFIRST, digits[num/100%10])
_shiftOut(DIO, CLK, LSBFIRST, 0x00)
_shiftOut(DIO, CLK, LSBFIRST, digits[num/10%10])
_shiftOut(DIO, CLK, LSBFIRST, 0x00)
_shiftOut(DIO, CLK, LSBFIRST, digits[num%10])
_shiftOut(DIO, CLK, LSBFIRST, 0x00)
GPIO.output(STB, True)
def numberDisplay_dec(num):
digits = [0x3f,0x06,0x5b,0x4f,0x66,0x6d,0x7d,0x07,0x7f,0x6f]
integer = 0
decimal = 0
pro = int(num * 100)
integer = int(pro / 100)
decimal = int(pro % 100)
sendCommand(0x40)
GPIO.output(STB, False)
_shiftOut(DIO, CLK, LSBFIRST, 0xc0)
_shiftOut(DIO, CLK, LSBFIRST, digits[integer/10])
_shiftOut(DIO, CLK, LSBFIRST, 0x00)
_shiftOut(DIO, CLK, LSBFIRST, digits[integer%10] | 0x80)
_shiftOut(DIO, CLK, LSBFIRST, 0x00)
_shiftOut(DIO, CLK, LSBFIRST, digits[decimal/10])
_shiftOut(DIO, CLK, LSBFIRST, 0x00)
_shiftOut(DIO, CLK, LSBFIRST, digits[decimal%10])
_shiftOut(DIO, CLK, LSBFIRST, 0x00)
GPIO.output(STB, True)
try:
TM1638_init()
numberDisplay(1234)
time.sleep(4) # 4s
numberDisplay_dec(56.78)
time.sleep(4) # 4s
while True:
numberDisplay(tmp)
tmp += 1
if tmp > 9999:
tmp = 0
time.sleep(0.05)
except KeyboardInterrupt:
GPIO.cleanup()
|
py | b4107d19f52341e67500e924b377abd99b3006fc | from common.numpy_fast import interp
from math import atan2, sqrt
from common.realtime import DT_DMON
from common.filter_simple import FirstOrderFilter
from common.stat_live import RunningStatFilter
from cereal import car
EventName = car.CarEvent.EventName
# ******************************************************************************************
# NOTE: To fork maintainers.
# Disabling or nerfing safety features may get you and your users banned from our servers.
# We recommend that you do not change these numbers from the defaults.
# ******************************************************************************************
_AWARENESS_TIME = 350. # passive wheel touch total timeout
_AWARENESS_PRE_TIME_TILL_TERMINAL = 12.
_AWARENESS_PROMPT_TIME_TILL_TERMINAL = 6.
_DISTRACTED_TIME = 110.
_DISTRACTED_PRE_TIME_TILL_TERMINAL = 8.
_DISTRACTED_PROMPT_TIME_TILL_TERMINAL = 6.
_FACE_THRESHOLD = 0.6
_EYE_THRESHOLD = 0.6
_SG_THRESHOLD = 0.5
_BLINK_THRESHOLD = 0.5
_BLINK_THRESHOLD_SLACK = 0.65
_BLINK_THRESHOLD_STRICT = 0.5
_PITCH_WEIGHT = 1.35 # pitch matters a lot more
_POSESTD_THRESHOLD = 0.14
_METRIC_THRESHOLD = 0.4
_METRIC_THRESHOLD_SLACK = 0.55
_METRIC_THRESHOLD_STRICT = 0.4
_PITCH_POS_ALLOWANCE = 0.12 # rad, to not be too sensitive on positive pitch
_PITCH_NATURAL_OFFSET = 0.02 # people don't seem to look straight when they drive relaxed, rather a bit up
_YAW_NATURAL_OFFSET = 0.08 # people don't seem to look straight when they drive relaxed, rather a bit to the right (center of car)
_HI_STD_TIMEOUT = 5
_HI_STD_FALLBACK_TIME = 10 # fall back to wheel touch if model is uncertain for a long time
_DISTRACTED_FILTER_TS = 0.25 # 0.6Hz
_POSE_CALIB_MIN_SPEED = 13 # 30 mph
_POSE_OFFSET_MIN_COUNT = 600 # valid data counts before calibration completes, 1 seg is 600 counts
_POSE_OFFSET_MAX_COUNT = 3600 # stop deweighting new data after 6 min, aka "short term memory"
_RECOVERY_FACTOR_MAX = 5. # relative to minus step change
_RECOVERY_FACTOR_MIN = 1.25 # relative to minus step change
MAX_TERMINAL_ALERTS = 3 # not allowed to engage after 3 terminal alerts
MAX_TERMINAL_DURATION = 300 # 30s
# model output refers to center of cropped image, so need to apply the x displacement offset
RESIZED_FOCAL = 320.0
H, W, FULL_W = 320, 160, 426
class DistractedType():
NOT_DISTRACTED = 0
BAD_POSE = 1
BAD_BLINK = 2
def face_orientation_from_net(angles_desc, pos_desc, rpy_calib, is_rhd):
# the output of these angles are in device frame
# so from driver's perspective, pitch is up and yaw is right
pitch_net = angles_desc[0]
yaw_net = angles_desc[1]
roll_net = angles_desc[2]
face_pixel_position = ((pos_desc[0] + .5)*W - W + FULL_W, (pos_desc[1]+.5)*H)
yaw_focal_angle = atan2(face_pixel_position[0] - FULL_W//2, RESIZED_FOCAL)
pitch_focal_angle = atan2(face_pixel_position[1] - H//2, RESIZED_FOCAL)
roll = roll_net
pitch = pitch_net + pitch_focal_angle
yaw = -yaw_net + yaw_focal_angle
# no calib for roll
pitch -= rpy_calib[1]
yaw -= rpy_calib[2] * (1 - 2 * int(is_rhd)) # lhd -> -=, rhd -> +=
return roll, pitch, yaw
class DriverPose():
def __init__(self):
self.yaw = 0.
self.pitch = 0.
self.roll = 0.
self.yaw_std = 0.
self.pitch_std = 0.
self.roll_std = 0.
self.pitch_offseter = RunningStatFilter(max_trackable=_POSE_OFFSET_MAX_COUNT)
self.yaw_offseter = RunningStatFilter(max_trackable=_POSE_OFFSET_MAX_COUNT)
self.low_std = True
self.cfactor = 1.
class DriverBlink():
def __init__(self):
self.left_blink = 0.
self.right_blink = 0.
self.cfactor = 1.
class DriverStatus():
def __init__(self):
self.pose = DriverPose()
self.pose_calibrated = self.pose.pitch_offseter.filtered_stat.n > _POSE_OFFSET_MIN_COUNT and \
self.pose.yaw_offseter.filtered_stat.n > _POSE_OFFSET_MIN_COUNT
self.blink = DriverBlink()
self.awareness = 1.
self.awareness_active = 1.
self.awareness_passive = 1.
self.driver_distracted = False
self.driver_distraction_filter = FirstOrderFilter(0., _DISTRACTED_FILTER_TS, DT_DMON)
self.face_detected = False
self.terminal_alert_cnt = 0
self.terminal_time = 0
self.step_change = 0.
self.active_monitoring_mode = True
self.hi_stds = 0
self.hi_std_alert_enabled = True
self.threshold_prompt = _DISTRACTED_PROMPT_TIME_TILL_TERMINAL / _DISTRACTED_TIME
self.is_rhd_region = False
self.is_rhd_region_checked = False
self._set_timers(active_monitoring=True)
def _set_timers(self, active_monitoring):
if self.active_monitoring_mode and self.awareness <= self.threshold_prompt:
if active_monitoring:
self.step_change = DT_DMON / _DISTRACTED_TIME
else:
self.step_change = 0.
return # no exploit after orange alert
elif self.awareness <= 0.:
return
if active_monitoring:
# when falling back from passive mode to active mode, reset awareness to avoid false alert
if not self.active_monitoring_mode:
self.awareness_passive = self.awareness
self.awareness = self.awareness_active
self.threshold_pre = _DISTRACTED_PRE_TIME_TILL_TERMINAL / _DISTRACTED_TIME
self.threshold_prompt = _DISTRACTED_PROMPT_TIME_TILL_TERMINAL / _DISTRACTED_TIME
self.step_change = DT_DMON / _DISTRACTED_TIME
self.active_monitoring_mode = True
else:
if self.active_monitoring_mode:
self.awareness_active = self.awareness
self.awareness = self.awareness_passive
self.threshold_pre = _AWARENESS_PRE_TIME_TILL_TERMINAL / _AWARENESS_TIME
self.threshold_prompt = _AWARENESS_PROMPT_TIME_TILL_TERMINAL / _AWARENESS_TIME
self.step_change = DT_DMON / _AWARENESS_TIME
self.active_monitoring_mode = False
def _is_driver_distracted(self, pose, blink):
if not self.pose_calibrated:
pitch_error = pose.pitch - _PITCH_NATURAL_OFFSET
yaw_error = pose.yaw - _YAW_NATURAL_OFFSET
else:
pitch_error = pose.pitch - self.pose.pitch_offseter.filtered_stat.mean()
yaw_error = pose.yaw - self.pose.yaw_offseter.filtered_stat.mean()
# positive pitch allowance
if pitch_error > 0.:
pitch_error = max(pitch_error - _PITCH_POS_ALLOWANCE, 0.)
pitch_error *= _PITCH_WEIGHT
pose_metric = sqrt(yaw_error**2 + pitch_error**2)
if pose_metric > _METRIC_THRESHOLD*pose.cfactor:
return DistractedType.BAD_POSE
elif (blink.left_blink + blink.right_blink)*0.5 > _BLINK_THRESHOLD*blink.cfactor:
return DistractedType.BAD_BLINK
else:
return DistractedType.NOT_DISTRACTED
def set_policy(self, model_data):
ep = min(model_data.meta.engagedProb, 0.8) / 0.8
self.pose.cfactor = interp(ep, [0, 0.5, 1], [_METRIC_THRESHOLD_STRICT, _METRIC_THRESHOLD, _METRIC_THRESHOLD_SLACK])/_METRIC_THRESHOLD
self.blink.cfactor = interp(ep, [0, 0.5, 1], [_BLINK_THRESHOLD_STRICT, _BLINK_THRESHOLD, _BLINK_THRESHOLD_SLACK])/_BLINK_THRESHOLD
def get_pose(self, driver_state, cal_rpy, car_speed, op_engaged):
# 10 Hz
if len(driver_state.faceOrientation) == 0 or len(driver_state.facePosition) == 0 or len(driver_state.faceOrientationStd) == 0 or len(driver_state.facePositionStd) == 0:
return
self.pose.roll, self.pose.pitch, self.pose.yaw = face_orientation_from_net(driver_state.faceOrientation, driver_state.facePosition, cal_rpy, self.is_rhd_region)
self.pose.pitch_std = driver_state.faceOrientationStd[0]
self.pose.yaw_std = driver_state.faceOrientationStd[1]
# self.pose.roll_std = driver_state.faceOrientationStd[2]
model_std_max = max(self.pose.pitch_std, self.pose.yaw_std)
self.pose.low_std = model_std_max < _POSESTD_THRESHOLD
self.blink.left_blink = driver_state.leftBlinkProb * (driver_state.leftEyeProb > _EYE_THRESHOLD) * (driver_state.sgProb < _SG_THRESHOLD)
self.blink.right_blink = driver_state.rightBlinkProb * (driver_state.rightEyeProb > _EYE_THRESHOLD) * (driver_state.sgProb < _SG_THRESHOLD)
self.face_detected = driver_state.faceProb > _FACE_THRESHOLD and \
abs(driver_state.facePosition[0]) <= 0.4 and abs(driver_state.facePosition[1]) <= 0.45
self.driver_distracted = self._is_driver_distracted(self.pose, self.blink) > 0
# first order filters
self.driver_distraction_filter.update(self.driver_distracted)
# update offseter
# only update when driver is actively driving the car above a certain speed
if self.face_detected and car_speed > _POSE_CALIB_MIN_SPEED and self.pose.low_std and (not op_engaged or not self.driver_distracted):
self.pose.pitch_offseter.push_and_update(self.pose.pitch)
self.pose.yaw_offseter.push_and_update(self.pose.yaw)
self.pose_calibrated = self.pose.pitch_offseter.filtered_stat.n > _POSE_OFFSET_MIN_COUNT and \
self.pose.yaw_offseter.filtered_stat.n > _POSE_OFFSET_MIN_COUNT
is_model_uncertain = self.hi_stds * DT_DMON > _HI_STD_FALLBACK_TIME
self._set_timers(self.face_detected and not is_model_uncertain)
if self.face_detected and not self.pose.low_std:
if not is_model_uncertain:
self.step_change *= min(1.0, max(0.6, 1.6*(model_std_max-0.5)*(model_std_max-2)))
self.hi_stds += 1
elif self.face_detected and self.pose.low_std:
self.hi_stds = 0
def update(self, events, driver_engaged, ctrl_active, standstill):
if (driver_engaged and self.awareness > 0) or not ctrl_active:
# reset only when on disengagement if red reached
self.awareness = 1.
self.awareness_active = 1.
self.awareness_passive = 1.
return
driver_attentive = self.driver_distraction_filter.x < 0.37
awareness_prev = self.awareness
if self.face_detected and self.hi_stds * DT_DMON > _HI_STD_TIMEOUT and self.hi_std_alert_enabled:
events.add(EventName.driverMonitorLowAcc)
self.hi_std_alert_enabled = False # only showed once until orange prompt resets it
if (driver_attentive and self.face_detected and self.pose.low_std and self.awareness > 0):
# only restore awareness when paying attention and alert is not red
self.awareness = min(self.awareness + ((_RECOVERY_FACTOR_MAX-_RECOVERY_FACTOR_MIN)*(1.-self.awareness)+_RECOVERY_FACTOR_MIN)*self.step_change, 1.)
if self.awareness == 1.:
self.awareness_passive = min(self.awareness_passive + self.step_change, 1.)
# don't display alert banner when awareness is recovering and has cleared orange
if self.awareness > self.threshold_prompt:
return
# should always be counting if distracted unless at standstill and reaching orange
if (not (self.face_detected and self.hi_stds * DT_DMON <= _HI_STD_FALLBACK_TIME) or (self.driver_distraction_filter.x > 0.63 and self.driver_distracted and self.face_detected)) and \
not (standstill and self.awareness - self.step_change <= self.threshold_prompt):
self.awareness = max(self.awareness - self.step_change, -0.1)
alert = None
if self.awareness <= 0.:
# terminal red alert: disengagement required
alert = EventName.driverDistracted if self.active_monitoring_mode else EventName.driverUnresponsive
self.hi_std_alert_enabled = True
self.terminal_time += 1
if awareness_prev > 0.:
self.terminal_alert_cnt += 1
elif self.awareness <= self.threshold_prompt:
# prompt orange alert
alert = EventName.promptDriverDistracted if self.active_monitoring_mode else EventName.promptDriverUnresponsive
elif self.awareness <= self.threshold_pre:
# pre green alert
alert = EventName.preDriverDistracted if self.active_monitoring_mode else EventName.preDriverUnresponsive
if alert is not None:
events.add(alert)
|
py | b4107e3b9a4cd608499efecf2000f94610b1a66b | """Bethe lattice with infinite coordination number.
This is in fact no real lattice, but a tree. It corresponds to a semi-circular
DOS.
:half_bandwidth: The half_bandwidth corresponds to a scaled nearest neighbor
hopping of `t=D/2`
"""
import numpy as np
from mpmath import mp
from gftool.precision import PRECISE_TYPES as _PRECISE_TYPES
def gf_z(z, half_bandwidth):
r"""Local Green's function of Bethe lattice for infinite coordination number.
.. math:: G(z) = 2(z - s\sqrt{z^2 - D^2})/D^2
where :math:`D` is the half bandwidth and :math:`s=sgn[ℑ{ξ}]`. See
[georges1996]_.
Parameters
----------
z : complex array_like or complex
Green's function is evaluated at complex frequency `z`
half_bandwidth : float
Half-bandwidth of the DOS of the Bethe lattice.
The `half_bandwidth` corresponds to the nearest neighbor hopping `t=D/2`
Returns
-------
gf_z : complex np.ndarray or complex
Value of the Bethe Green's function
References
----------
.. [georges1996] Georges et al., Rev. Mod. Phys. 68, 13 (1996)
https://doi.org/10.1103/RevModPhys.68.13
Examples
--------
>>> ww = np.linspace(-1.5, 1.5, num=500)
>>> gf_ww = gt.lattice.bethe.gf_z(ww, half_bandwidth=1)
>>> import matplotlib.pyplot as plt
>>> _ = plt.plot(ww, gf_ww.real, label=r"$\Re G$")
>>> _ = plt.plot(ww, gf_ww.imag, '--', label=r"$\Im G$")
>>> _ = plt.xlabel(r"$\omega/D$")
>>> _ = plt.ylabel(r"$G*D$")
>>> _ = plt.axhline(0, color='black', linewidth=0.8)
>>> _ = plt.xlim(left=ww.min(), right=ww.max())
>>> _ = plt.legend()
>>> plt.show()
"""
z_rel = np.array(z / half_bandwidth, dtype=np.complex256)
try:
complex_pres = np.complex256 if z.dtype in _PRECISE_TYPES else complex
except AttributeError:
complex_pres = complex
gf_z = 2./half_bandwidth*z_rel*(1 - np.sqrt(1 - z_rel**-2))
return gf_z.astype(dtype=complex_pres, copy=False)
def gf_d1_z(z, half_bandwidth):
"""First derivative of local Green's function of Bethe lattice for infinite coordination number.
Parameters
----------
z : complex array_like or complex
Green's function is evaluated at complex frequency `z`
half_bandwidth : float
half-bandwidth of the DOS of the Bethe lattice
The `half_bandwidth` corresponds to the nearest neighbor hopping `t=D/2`
Returns
-------
gf_d1_z : complex np.ndarray or complex
Value of the derivative of the Green's function
See Also
--------
gftool.lattice.bethe.gf_z
"""
z_rel_inv = np.array(half_bandwidth / z, dtype=np.complex256)
try:
complex_pres = np.complex256 if z.dtype in _PRECISE_TYPES else complex
except AttributeError:
complex_pres = complex
sqrt = np.sqrt(1 - z_rel_inv**2)
gf_d1 = 2. / half_bandwidth**2 * (1 - 1/sqrt)
return gf_d1.astype(dtype=complex_pres, copy=False)
def gf_d2_z(z, half_bandwidth):
"""Second derivative of local Green's function of Bethe lattice for infinite coordination number.
Parameters
----------
z : complex array_like or complex
Green's function is evaluated at complex frequency `z`
half_bandwidth : float
half-bandwidth of the DOS of the Bethe lattice
The `half_bandwidth` corresponds to the nearest neighbor hopping `t=D/2`
Returns
-------
gf_d2_z : complex np.ndarray or complex
Value of the Green's function
See Also
--------
gftool.lattice.bethe.gf_z
"""
z_rel = np.array(z / half_bandwidth, dtype=np.complex256)
try:
complex_pres = np.complex256 if z.dtype in _PRECISE_TYPES else complex
except AttributeError:
complex_pres = complex
sqrt = np.sqrt(1 - z_rel**-2)
gf_d2 = 2. / half_bandwidth**3 * z_rel * sqrt / (1 - z_rel**2)**2
return gf_d2.astype(dtype=complex_pres, copy=False)
def gf_z_inv(gf, half_bandwidth):
r"""Inverse of local Green's function of Bethe lattice for infinite coordination number.
.. math:: R(G) = (D/2)^2 G + 1/G
where :math:`R(z) = G^{-1}(z)` is the inverse of the Green's function.
Parameters
----------
gf : complex array_like or complex
Value of the local Green's function.
half_bandwidth : float
Half-bandwidth of the DOS of the Bethe lattice.
The `half_bandwidth` corresponds to the nearest neighbor hopping `t=D/2`
Returns
-------
z : complex np.ndarray or complex
The inverse of the Bethe Green's function `gf_z(gf_z_inv(g, D), D)=g`.
See Also
--------
gftool.lattice.bethe.gf_z
References
----------
.. [georges1996] Georges et al., Rev. Mod. Phys. 68, 13 (1996)
https://doi.org/10.1103/RevModPhys.68.13
Examples
--------
>>> ww = np.linspace(-1.5, 1.5, num=500) + 1e-4j
>>> gf_ww = gt.lattice.bethe.gf_z(ww, half_bandwidth=1)
>>> np.allclose(ww, gt.lattice.bethe.gf_z_inv(gf_ww, half_bandwidth=1))
True
"""
return (0.5 * half_bandwidth)**2 * gf + 1./gf
def hilbert_transform(xi, half_bandwidth):
r"""Hilbert transform of non-interacting DOS of the Bethe lattice.
The Hilbert transform is defined as:
.. math:: \tilde{D}(ξ) = ∫_{-∞}^{∞}dϵ \frac{DOS(ϵ)}{ξ − ϵ}
The lattice Hilbert transform is the same as the non-interacting Green's
function.
Parameters
----------
xi : complex array_like or complex
Point at which the Hilbert transform is evaluated
half_bandwidth : float
half-bandwidth of the DOS of the Bethe lattice
Returns
-------
hilbert_transform : complex np.ndarray or complex
Hilbert transform of `xi`.
Notes
-----
Relation between nearest neighbor hopping `t` and half-bandwidth `D`:
.. math:: 2t = D
See Also
--------
gftool.lattice.bethe.gf_z
"""
return gf_z(xi, half_bandwidth)
def dos(eps, half_bandwidth):
r"""DOS of non-interacting Bethe lattice for infinite coordination number.
Parameters
----------
eps : float array_like or float
DOS is evaluated at points `eps`.
half_bandwidth : float
Half-bandwidth of the DOS, DOS(| `eps` | > `half_bandwidth`) = 0.
The `half_bandwidth` corresponds to the nearest neighbor hopping `t=D/2`
Returns
-------
dos : float np.ndarray or float
The value of the DOS.
See Also
--------
gftool.lattice.bethe.dos_mp : multi-precision version suitable for integration
References
----------
.. [economou2006] Economou, E. N. Green's Functions in Quantum Physics.
Springer, 2006.
Examples
--------
>>> eps = np.linspace(-1.1, 1.1, num=500)
>>> dos = gt.lattice.bethe.dos(eps, half_bandwidth=1)
>>> import matplotlib.pyplot as plt
>>> _ = plt.plot(eps, dos)
>>> _ = plt.xlabel(r"$\epsilon/D$")
>>> _ = plt.ylabel(r"DOS * $D$")
>>> _ = plt.axvline(0, color='black', linewidth=0.8)
>>> _ = plt.ylim(bottom=0)
>>> _ = plt.xlim(left=eps.min(), right=eps.max())
>>> plt.show()
"""
eps_rel = np.asarray(eps / half_bandwidth)
dos = np.zeros_like(eps_rel)
nonzero = (abs(eps_rel) < 1) | np.iscomplex(eps)
dos[nonzero] = 2. / (np.pi*half_bandwidth) * np.sqrt(1 - eps_rel[nonzero]**2)
return dos
# ∫dϵ ϵ^m DOS(ϵ) for half-bandwidth D=1
# from: integral of dos_mp with mp.workdps(100)
# for m in range(0, 22, 2):
# with mp.workdps(100):
# print(mp.quad(lambda eps: 2 * eps**m * dos_mp(eps), [0, 1]))
# rational numbers obtained by mp.identify
dos_moment_coefficients = {
2: 0.25,
4: 0.125,
6: 5/64,
8: 7/128,
10: 21/512,
12: 0.0322265625,
14: 0.02618408203125,
16: 0.021820068359375,
18: 0.01854705810546875,
20: 0.016017913818359375,
}
def dos_moment(m, half_bandwidth):
"""Calculate the `m` th moment of the Bethe DOS.
The moments are defined as :math:`∫dϵ ϵ^m DOS(ϵ)`.
Parameters
----------
m : int
The order of the moment.
half_bandwidth : float
Half-bandwidth of the DOS of the Bethe lattice.
Returns
-------
dos_moment : float
The `m` th moment of the Bethe DOS.
Raises
------
NotImplementedError
Currently only implemented for a few specific moments `m`.
See Also
--------
gftool.lattice.bethe.dos
"""
if m % 2: # odd moments vanish due to symmetry
return 0
try:
return dos_moment_coefficients[m] * half_bandwidth**m
except KeyError as keyerr:
raise NotImplementedError('Calculation of arbitrary moments not implemented.') from keyerr
def dos_mp(eps, half_bandwidth=1):
r"""Multi-precision DOS of non-interacting Bethe lattice for infinite coordination number.
This function is particularly suited to calculate integrals of the form
:math:`∫dϵ DOS(ϵ)f(ϵ)`.
Parameters
----------
eps : mpmath.mpf or mpf_like
DOS is evaluated at points `eps`.
half_bandwidth : mpmath.mpf or mpf_like
Half-bandwidth of the DOS, DOS(| `eps` | > `half_bandwidth`) = 0.
The `half_bandwidth` corresponds to the nearest neighbor hopping `t=D/2`
Returns
-------
dos_mp : mpmath.mpf
The value of the DOS.
See Also
--------
gftool.lattice.bethe.dos : vectorized version suitable for array evaluations
References
----------
.. [economou2006] Economou, E. N. Green's Functions in Quantum Physics.
Springer, 2006.
Examples
--------
Calculate integrals:
>>> from mpmath import mp
>>> mp.quad(gt.lattice.bethe.dos_mp, [-1, 1])
mpf('1.0')
>>> eps = np.linspace(-1.1, 1.1, num=500)
>>> dos_mp = [gt.lattice.bethe.dos_mp(ee, half_bandwidth=1) for ee in eps]
>>> dos_mp = np.array(dos_mp, dtype=np.float64)
>>> import matplotlib.pyplot as plt
>>> _ = plt.plot(eps, dos_mp)
>>> _ = plt.xlabel(r"$\epsilon/D$")
>>> _ = plt.ylabel(r"DOS * $D$")
>>> _ = plt.axvline(0, color='black', linewidth=0.8)
>>> _ = plt.ylim(bottom=0)
>>> _ = plt.xlim(left=eps.min(), right=eps.max())
>>> plt.show()
"""
eps, half_bandwidth = mp.mpf(eps), mp.mpf(half_bandwidth)
if mp.fabs(eps) > half_bandwidth:
return mp.mpf('0')
return 2 / (mp.pi * half_bandwidth) * mp.sqrt(-mp.powm1(eps / half_bandwidth, mp.mpf('2')))
|
py | b4107e79408e65b9961a869a52e7c0cf2d0f638a | from sympy.physics.quantum.gate import H, X
from sympy.physics.quantum.qubit import Qubit
from quantpy.sympy.expr_extension import sympy_expr_add_rshift, sympy_expr_remove_rshift, sympy_expr_toggle_rshift
from quantpy.sympy.expr_extension import sympy_expr_add_operators
def test_sympy_expr_add_operators():
sympy_expr_add_operators()
assert H(0)>>H(1)>>H(2) == H(2)*H(1)*H(0)
def test_sympy_expr_toggle_rshift():
sympy_expr_remove_rshift()
sympy_expr_toggle_rshift()
assert H(0)>>H(1)>>H(2) == H(2)*H(1)*H(0)
sympy_expr_toggle_rshift()
try:
H(0)>>H(1)>>H(2)
assert False
except TypeError as ex:
assert True
def test_single_rshift_as_append_circuit():
sympy_expr_add_rshift()
h = H(0)
x = X(0)
assert ( h >> x) == x * h
assert (x >> h) == h * x
def test_qubit_rshift_as_input_to_circuit():
sympy_expr_add_rshift()
q = Qubit(0)
h = H(0)
assert (q >> h) == h * q
assert (h >> q) == q * h
def test_combine_operator_and_qubit_with_rshifts_to_make_circuit():
sympy_expr_add_rshift()
q = Qubit(0)
h = H(0)
x = X(0)
assert (q >> h >> x) == x * h * q
|
py | b4107ecabddd406a25b21f94bf7d0447150e5525 | import torch
class AlphaFoldLRScheduler(torch.optim.lr_scheduler._LRScheduler):
""" Implements the learning rate schedule defined in the AlphaFold 2
supplement. A linear warmup is followed by a plateau at the maximum
learning rate and then exponential decay.
Note that the initial learning rate of the optimizer in question is
ignored; use this class' base_lr parameter to specify the starting
point of the warmup.
"""
def __init__(self,
optimizer,
last_epoch: int = -1,
verbose: bool = False,
base_lr: float = 0.,
max_lr: float = 0.001,
warmup_no_steps: int = 1000,
start_decay_after_n_steps: int = 50000,
decay_every_n_steps: int = 50000,
decay_factor: float = 0.95,
):
step_counts = {
"warmup_no_steps": warmup_no_steps,
"start_decay_after_n_steps": start_decay_after_n_steps,
}
for k,v in step_counts.items():
if(v < 0):
raise ValueError(f"{k} must be nonnegative")
if(warmup_no_steps > start_decay_after_n_steps):
raise ValueError(
"warmup_no_steps must not exceed start_decay_after_n_steps"
)
self.optimizer = optimizer
self.last_epoch = last_epoch
self.verbose = verbose
self.base_lr = base_lr
self.max_lr = max_lr
self.warmup_no_steps = warmup_no_steps
self.start_decay_after_n_steps = start_decay_after_n_steps
self.decay_every_n_steps = decay_every_n_steps
self.decay_factor = decay_factor
super(AlphaFoldLRScheduler, self).__init__(
optimizer,
last_epoch=last_epoch,
verbose=verbose,
)
def state_dict(self):
state_dict = {
k:v for k,v in self.__dict__.items() if k not in ["optimizer"]
}
return state_dict
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict)
def get_lr(self):
if(not self._get_lr_called_within_step):
raise RuntimeError(
"To get the last learning rate computed by the scheduler, use "
"get_last_lr()"
)
step_no = self.last_epoch
if(step_no <= self.warmup_no_steps):
lr = self.base_lr + (step_no / self.warmup_no_steps) * self.max_lr
elif(step_no > self.start_decay_after_n_steps):
steps_since_decay = step_no - self.start_decay_after_n_steps
exp = (steps_since_decay // self.decay_every_n_steps) + 1
lr = self.max_lr * (self.decay_factor ** exp)
else: # plateau
lr = self.max_lr
return [lr for group in self.optimizer.param_groups]
|
py | b4107eeb0bd3cd5fa3e25c77a479c56cce575ed5 | # coding=utf-8
# ------------------------------------------------------------------------------
#
# SHA-512-BASED FEISTEL CIPHER
# by Toni Mattis
#
# Feistel Function: SHA-512(Block || Key)
# Key Size: Fully Dynamic
# Block Size: 1024 Bits
# Rounds: User-Specified
#
# ------------------------------------------------------------------------------
from hashlib import sha512
BPOS = tuple(range(64))
def enc_block(block, key, rounds=16):
x = block[:64]
y = block[64:]
for i in xrange(rounds):
h = sha512(x + key).digest()
y = ''.join([chr(ord(y[k]) ^ ord(h[k])) for k in BPOS])
h = sha512(y + key).digest()
x = ''.join([chr(ord(x[k]) ^ ord(h[k])) for k in BPOS])
return x + y
def dec_block(block, key, rounds=16):
x = block[:64]
y = block[64:]
for i in xrange(rounds):
h = sha512(y + key).digest()
x = ''.join([chr(ord(x[k]) ^ ord(h[k])) for k in BPOS])
h = sha512(x + key).digest()
y = ''.join([chr(ord(y[k]) ^ ord(h[k])) for k in BPOS])
return x + y
|
py | b4107ef306e3eada32a31f2e87ac3a25538af6e7 | #{{{ Imports
import gtrace.beam as beam
import gtrace.optcomp as opt
import gtrace.draw as draw
import gtrace.draw.renderer as renderer
#}}}
#{{{ Author and License Infomation
#Copyright (c) 2011-2021, Yoichi Aso
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = "Yoichi Aso"
__copyright__ = "Copyright 2011-2021, Yoichi Aso"
__credits__ = ["Yoichi Aso"]
__license__ = "BSD"
__version__ = "0.2.1"
__maintainer__ = "Yoichi Aso"
__email__ = "[email protected]"
__status__ = "Beta"
#}}}
#{{{ Draw optical system
def drawOptSys(optList, beamList, filename, fontSize=False):
d = draw.Canvas()
d.unit = 'm'
d.add_layer("main_beam", color=(255,0,0))
d.add_layer("main_beam_width", color=(255,0,255))
d.add_layer("stray_beam", color=(0,255,0))
d.add_layer("stray_beam_width", color=(0,255,255))
for b in beamList:
if b.stray_order > 0:
b.layer = 'stray_beam'
sigma = 1.0
drawWidth=False
else:
b.layer = 'main_beam'
sigma = 3.0
drawWidth=True
b.draw(d, sigma=sigma, drawWidth=drawWidth, drawPower=True, drawName=True, fontSize=fontSize)
drawAllOptics(d, optList, drawName=True)
render.renderDXF(d, filename)
#}}}
#{{{ Draw all beams
def drawAllBeams(d, beamList, sigma=3.0, drawWidth=True, drawPower=False,
drawROC=False, drawGouy=False, drawOptDist=False, layer=None, mode='x',
fontSize=0.01):
for ii in range(len(beamList)):
if layer is not None:
beamList[ii].layer = layer
beamList[ii].draw(d, sigma=sigma, mode=mode, drawWidth=drawWidth, drawPower=drawPower,
drawROC=drawROC, drawGouy=drawGouy, drawOptDist=drawOptDist,
fontSize=fontSize)
#}}}
#{{{ Draw all optics
def drawAllOptics(d, opticsList, drawName=True, layer=None):
for ii in range(len(opticsList)):
if layer is not None:
opticsList[ii].layer = layer
opticsList[ii].draw(d, drawName=drawName)
#}}}
#{{{ Translate all
def transAll(objList, transVect):
for ii in range(len(objList)):
objList[ii].translate(transVect)
#}}}
#{{{ Rotata all
def rotateAll(objList, angle, center):
for ii in range(len(objList)):
objList[ii].rotate(angle, center)
#}}}
|
py | b4107ffdaf975141850586dc3725a67e5aca048d | """
evfuncs
Python implementations of functions used with EvTAF and evsonganaly.m
adapated from hybrid-vocal-classifier
https://github.com/NickleDave/hybrid-vocal-classifier
under BSD license
https://github.com/NickleDave/hybrid-vocal-classifier/blob/master/LICENSE
"""
import os
import numpy as np
import scipy
from scipy.io import loadmat
def readrecf(filename):
"""
reads .rec files output by EvTAF
"""
rec_dict = {}
with open(filename, 'r') as recfile:
line_tmp = ""
while 1:
if line_tmp == "":
line = recfile.readline()
else:
line = line_tmp
line_tmp = ""
if line == "": # if End Of File
break
elif line == "\n": # if blank line
continue
elif "Catch" in line:
ind = line.find('=')
rec_dict['iscatch'] = line[ind + 1:]
elif "Chans" in line:
ind = line.find('=')
rec_dict['num_channels'] = int(line[ind + 1:])
elif "ADFREQ" in line:
ind = line.find('=')
try:
rec_dict['sample_freq'] = int(line[ind + 1:])
except ValueError:
rec_dict['sample_freq'] = float(line[ind + 1:])
elif "Samples" in line:
ind = line.find('=')
rec_dict['num_samples'] = int(line[ind + 1:])
elif "T After" in line:
ind = line.find('=')
rec_dict['time_after'] = float(line[ind + 1:])
elif "T Before" in line:
ind = line.find('=')
rec_dict['time before'] = float(line[ind + 1:])
elif "Output Sound File" in line:
ind = line.find('=')
rec_dict['outfile'] = line[ind + 1:]
elif "Thresholds" in line:
th_list = []
while 1:
line = recfile.readline()
if line == "":
break
try:
th_list.append(float(line))
except ValueError: # because we reached next section
line_tmp = line
break
rec_dict['thresholds'] = th_list
if line == "":
break
elif "Feedback information" in line:
fb_dict = {}
while 1:
line = recfile.readline()
if line == "":
break
elif line == "\n":
continue
ind = line.find("msec")
time = float(line[:ind - 1])
ind = line.find(":")
fb_type = line[ind + 2:]
fb_dict[time] = fb_type
rec_dict['feedback_info'] = fb_dict
if line == "":
break
elif "File created" in line:
header = [line]
for counter in range(4):
line = recfile.readline()
header.append(line)
rec_dict['header'] = header
return rec_dict
def load_cbin(filename, channel=0):
"""
loads .cbin files output by EvTAF.
arguments
---------
filename : string
channel : integer
default is 0
returns
-------
data : numpy array
1-d vector of 16-bit signed integers
sample_freq : integer
sampling frequency in Hz. Typically 32000.
"""
# .cbin files are big endian, 16 bit signed int, hence dtype=">i2" below
data = np.fromfile(filename, dtype=">i2")
recfile = filename[:-5] + '.rec'
rec_dict = readrecf(recfile)
data = data[channel::rec_dict['num_channels']] # step by number of channels
sample_freq = rec_dict['sample_freq']
return data, sample_freq
def load_notmat(filename):
"""
loads .not.mat files created by evsonganaly.m.
wrapper around scipy.io.loadmat.
Calls loadmat with squeeze_me=True to remove extra dimensions from arrays
that loadmat parser sometimes adds.
Argument
--------
filename : string, name of .not.mat file
Returns
-------
notmat_dict : dictionary of variables from .not.mat files
"""
if ".not.mat" in filename:
pass
elif filename[-4:] == "cbin":
filename += ".not.mat"
else:
raise ValueError("Filename should have extension .cbin.not.mat or"
" .cbin")
if not os.path.isfile(filename):
raise FileNotFoundError
else:
return loadmat(filename, squeeze_me=True)
def bandpass_filtfilt(rawsong, samp_freq, freq_cutoffs=None):
"""filter song audio with band pass filter, run through filtfilt
(zero-phase filter)
Parameters
----------
rawsong : ndarray
audio
samp_freq : int
sampling frequency
freq_cutoffs : list
2 elements long, cutoff frequencies for bandpass filter
if None, set to [500, 10000]. Default is None.
Returns
-------
filtsong : ndarray
"""
Nyquist_rate = samp_freq / 2
if freq_cutoffs is None:
freq_cutoffs = [500, 10000]
if rawsong.shape[-1] < 387:
numtaps = 64
elif rawsong.shape[-1] < 771:
numtaps = 128
elif rawsong.shape[-1] < 1539:
numtaps = 256
else:
numtaps = 512
cutoffs = np.asarray([freq_cutoffs[0] / Nyquist_rate,
freq_cutoffs[1] / Nyquist_rate])
# code on which this is based, bandpass_filtfilt.m, says it uses Hann(ing)
# window to design filter, but default for matlab's fir1
# is actually Hamming
# note that first parameter for scipy.signal.firwin is filter *length*
# whereas argument to matlab's fir1 is filter *order*
# for linear FIR, filter length is filter order + 1
b = scipy.signal.firwin(numtaps + 1, cutoffs, pass_zero=False)
a = np.zeros((numtaps+1,))
a[0] = 1 # make an "all-zero filter"
padlen = np.max((b.shape[-1] - 1, a.shape[-1] - 1))
filtsong = scipy.signal.filtfilt(b, a, rawsong, padlen=padlen)
return filtsong
def smooth_data(rawsong, samp_freq, freq_cutoffs=None, smooth_win=2):
"""filter raw audio and smooth signal
used to calculate amplitude.
Parameters
----------
rawsong : 1-d numpy array
"raw" voltage waveform from microphone
samp_freq : int
sampling frequency
freq_cutoffs: list
two-element list of integers, [low freq., high freq.]
bandpass filter applied with this list defining pass band.
Default is None, in which case bandpass filter is not applied.
smooth_win : integer
size of smoothing window in milliseconds. Default is 2.
Returns
-------
smooth : 1-d numpy array
smoothed waveform
Applies a bandpass filter with the frequency cutoffs in spect_params,
then rectifies the signal by squaring, and lastly smooths by taking
the average within a window of size sm_win.
This is a very literal translation from the Matlab function SmoothData.m
by Evren Tumer. Uses the Thomas-Santana algorithm.
"""
if freq_cutoffs is None:
# then don't do bandpass_filtfilt
filtsong = rawsong
else:
filtsong = bandpass_filtfilt(rawsong, samp_freq, freq_cutoffs)
squared_song = np.power(filtsong, 2)
len = np.round(samp_freq * smooth_win / 1000).astype(int)
h = np.ones((len,)) / len
smooth = np.convolve(squared_song, h)
offset = round((smooth.shape[-1] - filtsong.shape[-1]) / 2)
smooth = smooth[offset:filtsong.shape[-1] + offset]
return smooth |
py | b4108096b4c78b68daf106e04fad801d26c0a370 | from ooquery.expression import Expression, InvalidExpressionException
from sql.operators import *
from expects import *
with description('Creating an expression'):
with context('if is an invalid expression'):
with it('must raise an InvalidExpressionException'):
def callback():
Expression(('a',))
expect(callback).to(raise_error(InvalidExpressionException))
with context('if is a valid expresion'):
with it('not sould fail'):
exp = Expression(('a', '=', 'b'))
expect(exp.expression).to(equal(Equal('a', 'b')))
with context('if has an invalid operand'):
with it('must raise an exception'):
def callback():
Expression(('a', '!!=', 'b'))
expect(callback).to(raise_error(
ValueError, 'Operator !!= is not supported'
))
with context('testing if is a valid expression'):
with it('must return true if is a valid expression'):
is_exp = Expression.is_expression(('a', '=', 'b'))
expect(is_exp).to(be_true)
with it('must return false if is an invalid expression'):
is_exp = Expression.is_expression(('a', '='))
expect(is_exp).to(be_false)
|
py | b410813c6c4297c46c6ca2597443a122ba6dda59 | import numpy as np
import pygsti.baseobjs.basisconstructors as bc
from ..util import BaseCase
class BasisConstructorsTester(BaseCase):
def test_GellMann(self):
id2x2 = np.array([[1, 0], [0, 1]])
sigmax = np.array([[0, 1], [1, 0]])
sigmay = np.array([[0, -1.0j], [1.0j, 0]])
sigmaz = np.array([[1, 0], [0, -1]])
# Gell-Mann 2x2 matrices should just be the sigma matrices
GM2_mxs = bc.gm_matrices_unnormalized(2)
self.assertTrue(len(GM2_mxs) == 4)
self.assertArraysAlmostEqual(GM2_mxs[0], id2x2)
self.assertArraysAlmostEqual(GM2_mxs[1], sigmax)
self.assertArraysAlmostEqual(GM2_mxs[2], sigmay)
self.assertArraysAlmostEqual(GM2_mxs[3], sigmaz)
with self.assertRaises(TypeError):
bc.gm_matrices_unnormalized("FooBar") # arg must be tuple,list,or int
# Normalized Gell-Mann 2x2 matrices should just be the sigma matrices / sqrt(2)
NGM2_mxs = bc.gm_matrices(2)
self.assertTrue(len(NGM2_mxs) == 4)
self.assertArraysAlmostEqual(NGM2_mxs[0], id2x2 / np.sqrt(2))
self.assertArraysAlmostEqual(NGM2_mxs[1], sigmax / np.sqrt(2))
self.assertArraysAlmostEqual(NGM2_mxs[2], sigmay / np.sqrt(2))
self.assertArraysAlmostEqual(NGM2_mxs[3], sigmaz / np.sqrt(2))
#TODO: test 4x4 matrices?
def test_orthogonality(self):
#Gell Mann
dim = 5
mxs = bc.gm_matrices(dim)
N = len(mxs); self.assertTrue(N == dim**2)
gm_trMx = np.zeros((N, N), 'complex')
for i in range(N):
for j in range(N):
gm_trMx[i, j] = np.trace(np.dot(np.conjugate(np.transpose(mxs[i])), mxs[j]))
#Note: conjugate transpose not needed since mxs are Hermitian
self.assertArraysAlmostEqual(gm_trMx, np.identity(N, 'complex'))
#Std Basis
dim = 5
mxs = bc.std_matrices(dim)
N = len(mxs); self.assertTrue(N == dim**2)
std_trMx = np.zeros((N, N), 'complex')
for i in range(N):
for j in range(N):
std_trMx[i, j] = np.trace(np.dot(np.conjugate(np.transpose(mxs[i])), mxs[j]))
self.assertArraysAlmostEqual(std_trMx, np.identity(N, 'complex'))
#Pauli-product basis
dim = 4
mxs = bc.pp_matrices(dim)
N = len(mxs); self.assertTrue(N == dim**2)
with self.assertRaises(TypeError):
bc.pp_matrices("Foobar") # dim must be an int
with self.assertRaises(ValueError):
bc.pp_matrices(3) # dim must be a power of 4
specialCase = bc.pp_matrices(1) # single 1x1 identity mx
self.assertEqual(specialCase, [np.identity(1, 'complex')])
pp_trMx = np.zeros((N, N), 'complex')
for i in range(N):
for j in range(N):
pp_trMx[i, j] = np.trace(np.dot(np.conjugate(np.transpose(mxs[i])), mxs[j]))
#Note: conjugate transpose not needed since mxs are Hermitian
self.assertArraysAlmostEqual(pp_trMx, np.identity(N, 'complex'))
def test_basis_misc(self):
mx = bc.pp_matrices(1) # was [1] but this shouldn't be allowed
self.assertArraysAlmostEqual(np.identity(1, 'complex'), mx)
def test_pp_maxweight(self):
pp2Max1 = bc.pp_matrices(2, max_weight=1) # using max_weight
pp2 = bc.pp_matrices(2) # For 2x2, should match max_weight=1
for mxMax, mx in zip(pp2Max1, pp2):
self.assertArraysAlmostEqual(mxMax, mx)
pp4Max1 = bc.pp_matrices(4, max_weight=1)
pp4 = bc.pp_matrices(4)
pp4Subset = [pp4[0], pp4[1], pp4[2], pp4[3], pp4[4], pp4[8], pp4[12]] # Pull out II,IX,IY,IZ,XI,YI,ZI
for mxMax, mxSub in zip(pp4Max1, pp4Subset):
self.assertArraysAlmostEqual(mxMax, mxSub)
def test_qt_dim1(self):
qutrit1 = bc.qt_matrices(1) # special case when dim==1
self.assertArraysAlmostEqual(np.identity(1, 'd'), qutrit1)
def test_qt_orthonorm(self):
mxs = bc.qt_matrices(3)
for i in range(len(mxs)):
for j in range(len(mxs)):
dp = np.vdot(mxs[i], mxs[j])
if i == j:
self.assertAlmostEqual(dp, 1.0)
else:
self.assertAlmostEqual(dp, 0.0)
|
py | b41081c89c31531a7d2a026d5698adfa890c2aa0 | from typing import List
import os
import tensorflow as tf
import numpy as np
from utils.utils import loadTrainDataAsTFDataSet, loadValDataAsTFDataSet
from tensorflow.keras.metrics import Mean
import logging
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)
logger = logging.getLogger('__name__')
class ModelTrainer:
"""
Note:
Having this model keeps the trainStep and testStep instance new every time you call it.
Implementing those functions outside a class will return an error
ValueError: Creating variables on a non-first call to a function decorated with tf.function.
"""
def __init__(self, model, loss, metric, optimizer, ckptDir, logDir, multiGPU=True, evalStep=1000):
# Safety checks
if not os.path.exists(ckptDir):
os.makedirs(ckptDir)
if not os.path.exists(logDir):
os.makedirs(logDir)
self.ckpt = tf.train.Checkpoint(step=tf.Variable(0),
psnr=tf.Variable(1.0),
optimizer=optimizer,
model=model)
self.ckptMngr = tf.train.CheckpointManager(checkpoint=self.ckpt,
directory=ckptDir,
max_to_keep=5)
self.loss = loss
self.metric = metric
self.logDir = logDir
self.trainLoss = Mean(name='trainLoss')
self.trainPSNR = Mean(name='trainPSNR')
self.testLoss = Mean(name='testLoss')
self.testPSNR = Mean(name='testPSNR')
self.evalStep = evalStep
self.multiGPU = multiGPU
self.strategy = None
self.restore()
@property
def model(self):
return self.ckpt.model
def restore(self):
if self.ckptMngr.latest_checkpoint:
self.ckpt.restore(self.ckptMngr.latest_checkpoint)
print(f'[ INFO ] Model restored from checkpoint at step {self.ckpt.step.numpy()}.')
def fitTrainData(self,
X: tf.Tensor, y: tf.Tensor,
globalBatchSize: int, epochs: int,
valData: List[np.ma.array],
bufferSize: int = 128, valSteps: int = 64,
saveBestOnly: bool = True, initEpoch: int = 0):
logger.info('[ INFO ] Loading data set to buffer cache...')
trainSet = loadTrainDataAsTFDataSet(X, y[0], y[1], epochs, globalBatchSize, bufferSize)
valSet = loadValDataAsTFDataSet(valData[0], valData[1], valData[2], valSteps, globalBatchSize, bufferSize)
logger.info('[ INFO ] Loading success...')
w = tf.summary.create_file_writer(self.logDir)
dataSetLength = len(X)
totalSteps = tf.cast(dataSetLength/globalBatchSize, tf.int64)
globalStep = tf.cast(self.ckpt.step, tf.int64)
step = globalStep % totalSteps
epoch = initEpoch
logger.info('[ INFO ] Begin training...')
with w.as_default():
for x_batch_train, y_batch_train, y_mask_batch_train in trainSet:
if (totalSteps - step) == 0:
epoch += 1
step = tf.cast(self.ckpt.step, tf.int64) % totalSteps
logger.info(f'[ *************** NEW EPOCH *************** ] Epoch number {epoch}')
# Reset metrics
self.trainLoss.reset_states()
self.trainPSNR.reset_states()
self.testLoss.reset_states()
self.testPSNR.reset_states()
step += 1
globalStep += 1
self.trainStep(x_batch_train, y_batch_train, y_mask_batch_train)
self.ckpt.step.assign_add(1)
t = f"[ EPOCH {epoch}/{epochs} ] - [ STEP {step}/{int(totalSteps)} ] Loss: {self.trainLoss.result():.3f}, cPSNR: {self.trainPSNR.result():.3f}"
logger.info(t)
tf.summary.scalar('Train PSNR', self.trainPSNR.result(), step=globalStep)
tf.summary.scalar('Train loss', self.trainLoss.result(), step=globalStep)
if step != 0 and (step % self.evalStep) == 0:
# Reset states for test
self.testLoss.reset_states()
self.testPSNR.reset_states()
for x_batch_val, y_batch_val, y_mask_batch_val in valSet:
self.testStep(x_batch_val, y_batch_val, y_mask_batch_val)
tf.summary.scalar('Test loss', self.testLoss.result(), step=globalStep)
tf.summary.scalar('Test PSNR', self.testPSNR.result(), step=globalStep)
t = f"[ *************** VAL INFO *************** ] Validation Loss: {self.testLoss.result():.3f}, Validation PSNR: {self.testPSNR.result():.3f}"
logger.info(t)
w.flush()
if saveBestOnly and (self.testPSNR.result() <= self.ckpt.psnr):
continue
logger.info('[ SAVE ] Saving checkpoint...')
self.ckpt.psnr = self.testPSNR.result()
self.ckptMngr.save()
@tf.function
def trainStep(self, patchLR, patchHR, maskHR):
with tf.GradientTape() as tape:
predPatchHR = self.ckpt.model(patchLR, training=True)
# Loss(patchHR: tf.Tensor, maskHR: tf.Tensor, predPatchHR: tf.Tensor)
loss = self.loss(patchHR, maskHR, predPatchHR)
gradients = tape.gradient(loss, self.ckpt.model.trainable_variables)
self.ckpt.optimizer.apply_gradients(zip(gradients, self.ckpt.model.trainable_variables))
metric = self.metric(patchHR, maskHR, predPatchHR)
self.trainLoss(loss)
self.trainPSNR(metric)
@tf.function
def testStep(self, patchLR, patchHR, maskHR):
predPatchHR = self.ckpt.model(patchLR, training=False)
loss = self.loss(patchHR, maskHR, predPatchHR)
metric = self.metric(patchHR, maskHR, predPatchHR)
self.testLoss(loss)
self.testPSNR(metric)
|
py | b4108267c5843312d9680217123e78c4da9c3a46 | # :copyright: Copyright (c) 2018-2020. OS4D Ltd - All Rights Reserved
# :license: Commercial
# Unauthorized copying of this file, via any medium is strictly prohibited
# Written by Stefano Apostolico <[email protected]>, October 2020
import json
import logging
import os
from birder.core.check import BaseCheck
from birder.core.queue import send
from birder.exceptions import ValidationError
from .redis import client
logger = logging.getLogger(__name__)
class Registry:
def __init__(self, ctx=os.environ):
self.context = ctx
def initialize(self):
pass
def __len__(self):
return len(self._checks())
def __iter__(self):
checks = self._checks()
if not self.order:
self.sort_by()
return iter([checks[v] for v in self.order if v in checks.keys() and checks[v].enabled])
def disabled(self):
checks = self._checks()
if not self.order:
self.sort_by()
return iter([checks[v] for v in self.order if v in checks.keys() and not checks[v].enabled])
def __getitem__(self, item):
checks = self._checks()
return checks[item]
def _checks(self) -> [BaseCheck]:
from birder.checks import Factory
checks = {}
names = sorted([k for k, v in self.context.items() if k.startswith('MONITOR_')])
for varname in names:
try:
conn = self.context[varname]
check = Factory.from_conn_string(*conn.split('|', 1), system=True)
check.pk = varname
for k, v in self.overriden(varname).items():
setattr(check, k, v)
checks[varname] = check
except Exception as e:
logger.exception(e)
dynamics = self.get_dynamic()
for label, init_string in dynamics.items():
try:
check = Factory.from_conn_string(label.decode(), init_string.decode())
check.pk = check.name
override = client.get(f"override:{check.pk}")
if override:
for k, v in json.loads(override).items():
setattr(check, k, v)
checks[check.pk] = check
except Exception as e:
logger.exception(e)
return checks
@property
def order(self):
return [c.decode() for c in client.lrange("order", 0, client.llen("order"))]
def sort_by(self, sequence=None):
ordered = list(sequence or [])
ordered.extend([c.pk for c in self._checks().values() if c.pk not in ordered])
ordered.reverse()
p = client.pipeline()
p.delete("order")
p.lpush("order", *ordered)
p.execute()
def override(self, hkey, **kwargs):
client.set(f"override:{hkey}", json.dumps(kwargs))
def overriden(self, hkey):
return json.loads(client.get(f"override:{hkey}") or "{}")
def get_dynamic(self):
return client.hgetall('dynamic')
def add_dynamic(self, label, init_string):
data = client.hgetall('dynamic')
if label in data:
raise ValueError('--')
data[label.upper()] = init_string
client.hmset('dynamic', data)
self.sort_by()
send(label)
def enable(self, hkey):
kwargs = self.overriden(hkey)
kwargs['enabled'] = True
self.override(hkey, **kwargs)
send(hkey)
def remove(self, hkey):
check = self[hkey]
if check.system:
kwargs = self.overriden(hkey)
kwargs['enabled'] = False
self.override(hkey, **kwargs)
else:
client.hdel('dynamic', hkey.encode())
self.sort_by()
send(hkey)
registry = Registry()
|
py | b410826c109b15c7fb5617a2783af7ae93195eeb | # Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import numpy as np
import hashlib
def wrap(func, *args, unsqueeze=False):
"""
Wrap a torch function so it can be called with NumPy arrays.
Input and return types are seamlessly converted.
"""
# Convert input types where applicable
args = list(args)
for i, arg in enumerate(args):
if type(arg) == np.ndarray:
args[i] = torch.from_numpy(arg)
if unsqueeze:
args[i] = args[i].unsqueeze(0)
result = func(*args)
# Convert output types where applicable
if isinstance(result, tuple):
result = list(result)
for i, res in enumerate(result):
if type(res) == torch.Tensor:
if unsqueeze:
res = res.squeeze(0)
result[i] = res.numpy()
return tuple(result)
elif type(result) == torch.Tensor:
if unsqueeze:
result = result.squeeze(0)
return result.numpy()
else:
return result
def deterministic_random(min_value, max_value, data):
digest = hashlib.sha256(data.encode()).digest()
raw_value = int.from_bytes(digest[:4], byteorder='little', signed=False)
return int(raw_value / (2**32 - 1) * (max_value - min_value)) + min_value |
py | b41082c8c081a2c13f2e625c5d2f2e432db9aa0f | from __future__ import absolute_import
############################################################################
# Copyright (C) 2008 by Volker Christian #
# [email protected] #
# #
# This program is free software; you can redistribute it and#or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the #
# Free Software Foundation, Inc., #
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. #
############################################################################
from Components.ActionMap import ActionMap
from Components.Button import Button
from Components.ConfigList import ConfigListScreen
from Components.config import ConfigInteger
from Components.config import ConfigSelection
from Components.config import ConfigSubsection
from Components.config import ConfigSubList
from Components.config import ConfigText
from Components.config import config
from Components.config import getConfigListEntry
from Screens.Screen import Screen
from .YouTubeInterface import YouTubeUser
from . import _
# This should be executed only once during an enigma2-session
config.plugins.youtubeplayer = ConfigSubsection()
config.plugins.youtubeplayer.serverprofile = ConfigText("", False)
#config.plugins.youtubeplayer.quality = ConfigSelection(
# [
# ("", _("Low Quality (Mono)")),
# ("&fmt=6", _("Medium Quality (Mono)")),
# ("&fmt=18", _("High Quality (Stereo)")),
# ("&fmt=22", _("HD Quality (Stereo)"))
# ], "&fmt=18")
config.plugins.youtubeplayer.quality = ConfigSelection(
[
("1", _("Low Quality (Mono)")),
("6", _("Medium Quality (Mono)")),
("18", _("High Quality (Stereo)")),
("22", _("HD Quality (Stereo)"))
], "18")
class __YouTubeUserConfig():
def __init__(self):
self.userlist = []
config.plugins.youtubeplayer.usercount = ConfigInteger(0)
config.plugins.youtubeplayer.users = ConfigSubList()
config.plugins.youtubeplayer.defaultuser = ConfigText("", False)
for usernum in list(range(0, config.plugins.youtubeplayer.usercount.value)):
self.new()
# Add a new server or load a configsection if existing
def new(self):
newUserConfigSubsection = ConfigSubsection()
config.plugins.youtubeplayer.users.append(newUserConfigSubsection)
newUserConfigSubsection.name = ConfigText("User " + str(self.__getUserCount()), False)
if newUserConfigSubsection.name.value == newUserConfigSubsection.name.default:
newUserConfigSubsection.name.default = ""
newUserConfigSubsection.email = ConfigText("", False)
newUserConfigSubsection.password = ConfigText("", False)
newUser = YouTubeUser(newUserConfigSubsection)
self.userlist.append(newUser)
return newUser
# Add was canceled or existing server should be removed
def delete(self, user):
config.plugins.youtubeplayer.users.remove(user.getCfg())
self.userlist.remove(user)
self.__save()
# Edit or Add should complete
def save(self, user):
user.getCfg().save()
self.__save()
# Edit has been canceled
def cancel(self, user):
for element in list(user.getCfg().dict().values()):
element.cancel()
def getUserlist(self):
return self.userlist
def getUserByName(self, name):
for user in self.userlist:
if user.getName() == name:
return user
return None
def getDefaultUser(self):
return self.getUserByName(config.plugins.youtubeplayer.defaultuser.value)
def setAsDefault(self, defaultUser):
if defaultUser is not None:
config.plugins.youtubeplayer.defaultuser.value = defaultUser.getName()
config.plugins.youtubeplayer.defaultuser.save()
def __save(self):
config.plugins.youtubeplayer.usercount.value = self.__getUserCount()
config.plugins.youtubeplayer.usercount.save()
def __getUserCount(self):
return len(config.plugins.youtubeplayer.users)
youTubeUserConfig = __YouTubeUserConfig()
class YouTubeUserConfigScreen(Screen, ConfigListScreen):
def __init__(self, session, user):
Screen.__init__(self, session)
self.user = user
self["actions"] = ActionMap(["YouTubeUserConfigScreenActions"],
{
"save" : self.keySave,
"cancel" : self.keyCancel
}, -2)
self["key_red"] = Button(_("Cancel"))
self["key_green"] = Button(_("Save"))
self["key_yellow"] = Button("")
self["key_blue"] = Button("")
cfglist = []
cfglist.append(getConfigListEntry(_("User Profile Name"), user.name()))
cfglist.append(getConfigListEntry(_("E-Mail Address"), user.email()))
cfglist.append(getConfigListEntry(_("Password"), user.password()))
ConfigListScreen.__init__(self, cfglist, session)
def keySave(self):
self.close(True, self.user)
def keyCancel(self):
self.close(False, self.user)
|
py | b41082e7d585a3c9e66d109cbd6aaade57bd7096 | # Copyright 2018,2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from functools import partial
import nnabla.utils.converter
import numpy as np
def generate_value(type, dims, multiplier):
if type == 'Normal':
ret = np.random.randn(*dims) * multiplier
elif type == 'Uniform':
ret = np.random.uniform(-multiplier, multiplier, size=dims)
elif type == 'Constant':
ret = np.ones(dims) * multiplier
else:
raise ValueError('Generator type "' +
type + '" is not supported.')
return ret.astype(np.float32)
def create_nnabart_info(nnp, batch_size):
class info:
pass
executor = nnabla.utils.converter.select_executor(nnp)
# Search network.
network = nnabla.utils.converter.search_network(
nnp, executor.network_name)
if network is None:
print('Network for executor [{}] is not found.'.format(
executor.network_name))
return
print('Using network [{}].'.format(executor.network_name))
info._batch_size = batch_size
if batch_size < 0:
info._batch_size = network.batch_size
info._network_name = executor.network_name
parameters = collections.OrderedDict()
for p in nnp.protobuf.parameter:
parameters[p.variable_name] = p
variables = collections.OrderedDict()
for v in network.variable:
variables[v.name] = v
info._generator_variables = {}
info._num_of_gen_variables = len(executor.generator_variable)
for v in executor.generator_variable:
v_info = variables[v.variable_name]
shape = [d if d > 0 else info._batch_size for d in v_info.shape.dim]
data = generate_value(v.type, shape, v.multiplier)
info._generator_variables[v.variable_name] = data
info._input_variables = []
info._num_of_inputs = len(executor.data_variable)
info._input_buffer_sizes = []
for n, i in enumerate(executor.data_variable):
info._input_variables.append(i.variable_name)
v = variables[i.variable_name]
info._input_buffer_sizes.append(
nnabla.utils.converter.calc_shape_size(v.shape, info._batch_size))
info._output_variables = []
info._num_of_outputs = len(executor.output_variable)
info._output_buffer_sizes = []
for n, o in enumerate(executor.output_variable):
info._output_variables.append(o.variable_name)
v = variables[o.variable_name]
info._output_buffer_sizes.append(
nnabla.utils.converter.calc_shape_size(v.shape, info._batch_size))
info._param_variables = []
info._num_of_params = len(executor.parameter_variable)
for n, p in enumerate(executor.parameter_variable):
info._param_variables.append(p.variable_name)
# Prepare variable buffers
info._variable_sizes = []
info._variable_buffer_index = collections.OrderedDict()
info._variable_buffer_size = collections.OrderedDict()
info._buffer_ids = {}
buffer_index = 0
for n, v in enumerate(network.variable):
size = nnabla.utils.converter.calc_shape_size(
v.shape, info._batch_size)
info._variable_sizes.append(size)
if v.type == 'Buffer':
info._variable_buffer_index[buffer_index] = [n]
for vid in info._variable_buffer_index[buffer_index]:
info._buffer_ids[vid] = buffer_index
if buffer_index in info._variable_buffer_size:
if size > info._variable_buffer_size[buffer_index]:
info._variable_buffer_size[buffer_index] = size
else:
info._variable_buffer_size[buffer_index] = size
buffer_index += 1
info._parameters = parameters
info._variables = variables
info._network = network
info._function_info = nnabla.utils.converter.get_function_info()
info._convert_context = {}
return info
def revise_buffer_size(info, settings):
'''
This function is used to revise buffer size, use byte
as its unit, instead of data item.
This is only used for nnb, not for csrc.
When settings contains user customized data type, not pure
FLOAT32, it affects the memory consumption.
'''
size_mapping = {
'FLOAT32': 4,
'FIXED16': 2,
'FIXED8': 1
}
var_dict = settings['variables']
buffer_index = 0
info._variable_sizes = []
info._variable_buffer_index = collections.OrderedDict()
info._variable_buffer_size = collections.OrderedDict()
info._buffer_ids = {}
for n, v in enumerate(info._network.variable):
byte_per_item = size_mapping.get(var_dict.get(
v.name, 'FLOAT32').split('_')[0], 4)
size = nnabla.utils.converter.calc_shape_size(
v.shape, info._batch_size) * byte_per_item
info._variable_sizes.append(size)
if v.type == 'Buffer':
info._variable_buffer_index[buffer_index] = [n]
for vid in info._variable_buffer_index[buffer_index]:
info._buffer_ids[vid] = buffer_index
info._variable_buffer_size[buffer_index] = size
buffer_index += 1
def affine_transpose_weight(params, info, func):
if 'Affine' in info._convert_context:
transposed = info._convert_context['Affine']
else:
transposed = set()
for idx in params:
weight_name = func.input[idx]
if weight_name in transposed:
return
w_shape = info._variables[weight_name].shape.dim[:]
if weight_name in info._parameters:
w_data = info._parameters[weight_name]
transposed.add(weight_name)
info._convert_context['Affine'] = transposed
else:
print(
"WARNING: affine weight is not transposed. Since it is not included in .nntxt/.nnp")
i_num = w_shape[0]
data = np.array(w_data.data[:])
data = data.reshape(int(i_num), -1)
data = np.transpose(data)
del info._parameters[weight_name].data[:]
info._parameters[weight_name].data.extend(data.flatten())
def pack_bin_conv_unused_weight(index, info, func):
weight_name = func.input[index]
d = info._parameters[weight_name].data[:]
d = d[0:1] # TRUNC TO 1
del info._parameters[weight_name].data[:]
info._parameters[weight_name].data.extend(d)
NNB_PREPROCESS_LIST = {
'Affine': partial(affine_transpose_weight, [1]),
'BinaryConnectAffine': partial(affine_transpose_weight, [1, 2]),
'BinaryWeightAffine': partial(affine_transpose_weight, [1, 2]),
'BinaryWeightConvolution': partial(pack_bin_conv_unused_weight, 1),
'BinaryConnectConvolution': partial(pack_bin_conv_unused_weight, 1)
}
CSRC_PREPROCESS_LIST = {
'Affine': partial(affine_transpose_weight, [1]),
'BinaryConnectAffine': partial(affine_transpose_weight, [1, 2]),
'BinaryWeightAffine': partial(affine_transpose_weight, [1, 2])
}
PREPROCESS_DICT = {
'CSRC': CSRC_PREPROCESS_LIST,
'NNB': NNB_PREPROCESS_LIST
}
def preprocess_for_exporter(info, exporter_name):
if exporter_name in PREPROCESS_DICT:
preprocess_list = PREPROCESS_DICT[exporter_name]
else:
return
for func in info._network.function:
if func.type in preprocess_list:
preprocessor = preprocess_list[func.type]
if callable(preprocessor):
preprocessor(info, func)
|
py | b41083ae229f9c38198a7965e13bf1a2586f037b | """
This file offers the methods to automatically retrieve the graph Pelobacter carbinolicus.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def PelobacterCarbinolicus(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Pelobacter carbinolicus graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Pelobacter carbinolicus graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="PelobacterCarbinolicus",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
py | b41083e918d1c724941024ccd2c0a4ba3aba8f13 |
import sys
import subprocess
import mx.DateTime
sts = mx.DateTime.DateTime(2013,6,1)
ets = mx.DateTime.DateTime(2013,6,26)
interval = mx.DateTime.RelativeDateTime(minutes=1440)
now = sts
while now < ets:
print now
cmd = "python summarize.py %s" % (now.strftime("%Y %m %d"),)
subprocess.call(cmd, shell=True)
now += interval
|
py | b41084422d1c0759503d0d4d795bb48e6ab0ac56 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests timesketch utilities."""
from __future__ import unicode_literals
import unittest
from dftimewolf.lib import timesketch_utils
class TimesketchAPIClient(unittest.TestCase):
"""Tests for the Timesketch API client."""
def testInitialization(self):
"""Tests that the processor can be initialized."""
timesketch_url = 'http://localhost'
username = 'test'
password = 'test'
timesketch_client = timesketch_utils.TimesketchApiClient(
host_url=timesketch_url, username=username, password=password)
self.assertIsNotNone(timesketch_client)
if __name__ == '__main__':
unittest.main()
|
py | b410844f816ebb57232e89f8dbc5fd484a9256a8 | from abc import ABCMeta, abstractmethod
import traceback
import sys
import warnings
import typing
import functools
from collections import OrderedDict
from collections.abc import Iterable, MutableMapping, MutableSet, MutableSequence
from enum import Enum
from .. import tracer
from .._utils import *
from .._unused import *
__all__ = [
"Shape", "signed", "unsigned",
"Value", "Const", "C", "AnyConst", "AnySeq", "Operator", "Mux", "Part", "Slice", "Cat", "Repl",
"Array", "ArrayProxy",
"Signal", "ClockSignal", "ResetSignal",
"UserValue", "ValueCastable",
"Sample", "Past", "Stable", "Rose", "Fell", "Initial",
"Statement", "Switch",
"Property", "Assign", "Assert", "Assume", "Cover",
"ValueKey", "ValueDict", "ValueSet", "SignalKey", "SignalDict", "SignalSet",
]
class DUID:
"""Deterministic Unique IDentifier."""
__next_uid = 0
def __init__(self):
self.duid = DUID.__next_uid
DUID.__next_uid += 1
class Shape:
"""Bit width and signedness of a value.
A ``Shape`` can be constructed using:
* explicit bit width and signedness;
* aliases :func:`signed` and :func:`unsigned`;
* casting from a variety of objects.
A ``Shape`` can be cast from:
* an integer, where the integer specifies the bit width;
* a range, where the result is wide enough to represent any element of the range, and is
signed if any element of the range is signed;
* an :class:`Enum` with all integer members or :class:`IntEnum`, where the result is wide
enough to represent any member of the enumeration, and is signed if any member of
the enumeration is signed.
Parameters
----------
width : int
The number of bits in the representation, including the sign bit (if any).
signed : bool
If ``False``, the value is unsigned. If ``True``, the value is signed two's complement.
"""
def __init__(self, width=1, signed=False):
if not isinstance(width, int) or width < 0:
raise TypeError("Width must be a non-negative integer, not {!r}"
.format(width))
self.width = width
self.signed = signed
def __iter__(self):
return iter((self.width, self.signed))
@staticmethod
def cast(obj, *, src_loc_at=0):
if isinstance(obj, Shape):
return obj
if isinstance(obj, int):
return Shape(obj)
if isinstance(obj, tuple):
width, signed = obj
warnings.warn("instead of `{tuple}`, use `{constructor}({width})`"
.format(constructor="signed" if signed else "unsigned", width=width,
tuple=obj),
DeprecationWarning, stacklevel=2 + src_loc_at)
return Shape(width, signed)
if isinstance(obj, range):
if len(obj) == 0:
return Shape(0, obj.start < 0)
signed = obj.start < 0 or (obj.stop - obj.step) < 0
width = max(bits_for(obj.start, signed),
bits_for(obj.stop - obj.step, signed))
return Shape(width, signed)
if isinstance(obj, type) and issubclass(obj, Enum):
min_value = min(member.value for member in obj)
max_value = max(member.value for member in obj)
if not isinstance(min_value, int) or not isinstance(max_value, int):
raise TypeError("Only enumerations with integer values can be used "
"as value shapes")
signed = min_value < 0 or max_value < 0
width = max(bits_for(min_value, signed), bits_for(max_value, signed))
return Shape(width, signed)
raise TypeError("Object {!r} cannot be used as value shape".format(obj))
def __repr__(self):
if self.signed:
return "signed({})".format(self.width)
else:
return "unsigned({})".format(self.width)
def __eq__(self, other):
if isinstance(other, tuple) and len(other) == 2:
width, signed = other
if isinstance(width, int) and isinstance(signed, bool):
return self.width == width and self.signed == signed
else:
raise TypeError("Shapes may be compared with other Shapes and (int, bool) tuples, "
"not {!r}"
.format(other))
if not isinstance(other, Shape):
raise TypeError("Shapes may be compared with other Shapes and (int, bool) tuples, "
"not {!r}"
.format(other))
return self.width == other.width and self.signed == other.signed
def unsigned(width):
"""Shorthand for ``Shape(width, signed=False)``."""
return Shape(width, signed=False)
def signed(width):
"""Shorthand for ``Shape(width, signed=True)``."""
return Shape(width, signed=True)
class Value(metaclass=ABCMeta):
@staticmethod
def cast(obj):
"""Converts ``obj`` to an nMigen value.
Booleans and integers are wrapped into a :class:`Const`. Enumerations whose members are
all integers are converted to a :class:`Const` with a shape that fits every member.
"""
if isinstance(obj, Value):
return obj
if isinstance(obj, int):
return Const(obj)
if isinstance(obj, Enum):
return Const(obj.value, Shape.cast(type(obj)))
if isinstance(obj, ValueCastable):
return obj.as_value()
raise TypeError("Object {!r} cannot be converted to an nMigen value".format(obj))
def __init__(self, *, src_loc_at=0):
super().__init__()
self.src_loc = tracer.get_src_loc(1 + src_loc_at)
def __bool__(self):
raise TypeError("Attempted to convert nMigen value to Python boolean")
def __invert__(self):
return Operator("~", [self])
def __neg__(self):
return Operator("-", [self])
def __add__(self, other):
return Operator("+", [self, other])
def __radd__(self, other):
return Operator("+", [other, self])
def __sub__(self, other):
return Operator("-", [self, other])
def __rsub__(self, other):
return Operator("-", [other, self])
def __mul__(self, other):
return Operator("*", [self, other])
def __rmul__(self, other):
return Operator("*", [other, self])
def __check_divisor(self):
width, signed = self.shape()
if signed:
# Python's division semantics and Verilog's division semantics differ for negative
# divisors (Python uses div/mod, Verilog uses quo/rem); for now, avoid the issue
# completely by prohibiting such division operations.
raise NotImplementedError("Division by a signed value is not supported")
def __mod__(self, other):
other = Value.cast(other)
other.__check_divisor()
return Operator("%", [self, other])
def __rmod__(self, other):
self.__check_divisor()
return Operator("%", [other, self])
def __floordiv__(self, other):
other = Value.cast(other)
other.__check_divisor()
return Operator("//", [self, other])
def __rfloordiv__(self, other):
self.__check_divisor()
return Operator("//", [other, self])
def __check_shamt(self):
width, signed = self.shape()
if signed:
# Neither Python nor HDLs implement shifts by negative values; prohibit any shifts
# by a signed value to make sure the shift amount can always be interpreted as
# an unsigned value.
raise TypeError("Shift amount must be unsigned")
def __lshift__(self, other):
other = Value.cast(other)
other.__check_shamt()
return Operator("<<", [self, other])
def __rlshift__(self, other):
self.__check_shamt()
return Operator("<<", [other, self])
def __rshift__(self, other):
other = Value.cast(other)
other.__check_shamt()
return Operator(">>", [self, other])
def __rrshift__(self, other):
self.__check_shamt()
return Operator(">>", [other, self])
def __and__(self, other):
return Operator("&", [self, other])
def __rand__(self, other):
return Operator("&", [other, self])
def __xor__(self, other):
return Operator("^", [self, other])
def __rxor__(self, other):
return Operator("^", [other, self])
def __or__(self, other):
return Operator("|", [self, other])
def __ror__(self, other):
return Operator("|", [other, self])
def __eq__(self, other):
return Operator("==", [self, other])
def __ne__(self, other):
return Operator("!=", [self, other])
def __lt__(self, other):
return Operator("<", [self, other])
def __le__(self, other):
return Operator("<=", [self, other])
def __gt__(self, other):
return Operator(">", [self, other])
def __ge__(self, other):
return Operator(">=", [self, other])
def __abs__(self):
width, signed = self.shape()
if signed:
return Mux(self >= 0, self, -self)
else:
return self
def __len__(self):
return self.shape().width
def __getitem__(self, key):
n = len(self)
if isinstance(key, int):
if key not in range(-n, n):
raise IndexError(f"Index {key} is out of bounds for a {n}-bit value")
if key < 0:
key += n
return Slice(self, key, key + 1)
elif isinstance(key, slice):
start, stop, step = key.indices(n)
if step != 1:
return Cat(self[i] for i in range(start, stop, step))
return Slice(self, start, stop)
else:
raise TypeError("Cannot index value with {}".format(repr(key)))
def as_unsigned(self):
"""Conversion to unsigned.
Returns
-------
Value, out
This ``Value`` reinterpreted as a unsigned integer.
"""
return Operator("u", [self])
def as_signed(self):
"""Conversion to signed.
Returns
-------
Value, out
This ``Value`` reinterpreted as a signed integer.
"""
return Operator("s", [self])
def bool(self):
"""Conversion to boolean.
Returns
-------
Value, out
``1`` if any bits are set, ``0`` otherwise.
"""
return Operator("b", [self])
def any(self):
"""Check if any bits are ``1``.
Returns
-------
Value, out
``1`` if any bits are set, ``0`` otherwise.
"""
return Operator("r|", [self])
def all(self):
"""Check if all bits are ``1``.
Returns
-------
Value, out
``1`` if all bits are set, ``0`` otherwise.
"""
return Operator("r&", [self])
def xor(self):
"""Compute pairwise exclusive-or of every bit.
Returns
-------
Value, out
``1`` if an odd number of bits are set, ``0`` if an even number of bits are set.
"""
return Operator("r^", [self])
def implies(premise, conclusion):
"""Implication.
Returns
-------
Value, out
``0`` if ``premise`` is true and ``conclusion`` is not, ``1`` otherwise.
"""
return ~premise | conclusion
def bit_select(self, offset, width):
"""Part-select with bit granularity.
Selects a constant width but variable offset part of a ``Value``, such that successive
parts overlap by all but 1 bit.
Parameters
----------
offset : Value, int
Index of first selected bit.
width : int
Number of selected bits.
Returns
-------
Part, out
Selected part of the ``Value``
"""
offset = Value.cast(offset)
if type(offset) is Const and isinstance(width, int):
return self[offset.value:offset.value + width]
return Part(self, offset, width, stride=1, src_loc_at=1)
def word_select(self, offset, width):
"""Part-select with word granularity.
Selects a constant width but variable offset part of a ``Value``, such that successive
parts do not overlap.
Parameters
----------
offset : Value, int
Index of first selected word.
width : int
Number of selected bits.
Returns
-------
Part, out
Selected part of the ``Value``
"""
offset = Value.cast(offset)
if type(offset) is Const and isinstance(width, int):
return self[offset.value * width:(offset.value + 1) * width]
return Part(self, offset, width, stride=width, src_loc_at=1)
def matches(self, *patterns):
"""Pattern matching.
Matches against a set of patterns, which may be integers or bit strings, recognizing
the same grammar as ``Case()``.
Parameters
----------
patterns : int or str
Patterns to match against.
Returns
-------
Value, out
``1`` if any pattern matches the value, ``0`` otherwise.
"""
matches = []
for pattern in patterns:
if not isinstance(pattern, (int, str, Enum)):
raise SyntaxError("Match pattern must be an integer, a string, or an enumeration, "
"not {!r}"
.format(pattern))
if isinstance(pattern, str) and any(bit not in "01- \t" for bit in pattern):
raise SyntaxError("Match pattern '{}' must consist of 0, 1, and - (don't care) "
"bits, and may include whitespace"
.format(pattern))
if (isinstance(pattern, str) and
len("".join(pattern.split())) != len(self)):
raise SyntaxError("Match pattern '{}' must have the same width as match value "
"(which is {})"
.format(pattern, len(self)))
if isinstance(pattern, int) and bits_for(pattern) > len(self):
warnings.warn("Match pattern '{:b}' is wider than match value "
"(which has width {}); comparison will never be true"
.format(pattern, len(self)),
SyntaxWarning, stacklevel=3)
continue
if isinstance(pattern, str):
pattern = "".join(pattern.split()) # remove whitespace
mask = int(pattern.replace("0", "1").replace("-", "0"), 2)
pattern = int(pattern.replace("-", "0"), 2)
matches.append((self & mask) == pattern)
elif isinstance(pattern, int):
matches.append(self == pattern)
elif isinstance(pattern, Enum):
matches.append(self == pattern.value)
else:
assert False
if not matches:
return Const(0)
elif len(matches) == 1:
return matches[0]
else:
return Cat(*matches).any()
def shift_left(self, amount):
"""Shift left by constant amount.
Parameters
----------
amount : int
Amount to shift by.
Returns
-------
Value, out
If the amount is positive, the input shifted left. Otherwise, the input shifted right.
"""
if not isinstance(amount, int):
raise TypeError("Shift amount must be an integer, not {!r}".format(amount))
if amount < 0:
return self.shift_right(-amount)
if self.shape().signed:
return Cat(Const(0, amount), self).as_signed()
else:
return Cat(Const(0, amount), self) # unsigned
def shift_right(self, amount):
"""Shift right by constant amount.
Parameters
----------
amount : int
Amount to shift by.
Returns
-------
Value, out
If the amount is positive, the input shifted right. Otherwise, the input shifted left.
"""
if not isinstance(amount, int):
raise TypeError("Shift amount must be an integer, not {!r}".format(amount))
if amount < 0:
return self.shift_left(-amount)
if self.shape().signed:
return self[amount:].as_signed()
else:
return self[amount:] # unsigned
def rotate_left(self, amount):
"""Rotate left by constant amount.
Parameters
----------
amount : int
Amount to rotate by.
Returns
-------
Value, out
If the amount is positive, the input rotated left. Otherwise, the input rotated right.
"""
if not isinstance(amount, int):
raise TypeError("Rotate amount must be an integer, not {!r}".format(amount))
amount %= len(self)
return Cat(self[-amount:], self[:-amount]) # meow :3
def rotate_right(self, amount):
"""Rotate right by constant amount.
Parameters
----------
amount : int
Amount to rotate by.
Returns
-------
Value, out
If the amount is positive, the input rotated right. Otherwise, the input rotated right.
"""
if not isinstance(amount, int):
raise TypeError("Rotate amount must be an integer, not {!r}".format(amount))
amount %= len(self)
return Cat(self[amount:], self[:amount])
def eq(self, value):
"""Assignment.
Parameters
----------
value : Value, in
Value to be assigned.
Returns
-------
Assign
Assignment statement that can be used in combinatorial or synchronous context.
"""
return Assign(self, value, src_loc_at=1)
@abstractmethod
def shape(self):
"""Bit width and signedness of a value.
Returns
-------
Shape
See :class:`Shape`.
Examples
--------
>>> Signal(8).shape()
Shape(width=8, signed=False)
>>> Const(0xaa).shape()
Shape(width=8, signed=False)
"""
pass # :nocov:
def _lhs_signals(self):
raise TypeError("Value {!r} cannot be used in assignments".format(self))
@abstractmethod
def _rhs_signals(self):
pass # :nocov:
def _as_const(self):
raise TypeError("Value {!r} cannot be evaluated as constant".format(self))
__hash__ = None
@final
class Const(Value):
"""A constant, literal integer value.
Parameters
----------
value : int
shape : int or tuple or None
Either an integer ``width`` or a tuple ``(width, signed)`` specifying the number of bits
in this constant and whether it is signed (can represent negative values).
``shape`` defaults to the minimum possible width and signedness of ``value``.
Attributes
----------
width : int
signed : bool
"""
src_loc = None
@staticmethod
def normalize(value, shape):
width, signed = shape
mask = (1 << width) - 1
value &= mask
if signed and value >> (width - 1):
value |= ~mask
return value
def __init__(self, value, shape=None, *, src_loc_at=0):
# We deliberately do not call Value.__init__ here.
self.value = int(value)
if shape is None:
shape = Shape(bits_for(self.value), signed=self.value < 0)
elif isinstance(shape, int):
shape = Shape(shape, signed=self.value < 0)
else:
shape = Shape.cast(shape, src_loc_at=1 + src_loc_at)
self.width, self.signed = shape
self.value = self.normalize(self.value, shape)
def shape(self):
return Shape(self.width, self.signed)
def _rhs_signals(self):
return SignalSet()
def _as_const(self):
return self.value
def __repr__(self):
return "(const {}'{}d{})".format(self.width, "s" if self.signed else "", self.value)
C = Const # shorthand
class AnyValue(Value, DUID):
def __init__(self, shape, *, src_loc_at=0):
super().__init__(src_loc_at=src_loc_at)
self.width, self.signed = Shape.cast(shape, src_loc_at=1 + src_loc_at)
if not isinstance(self.width, int) or self.width < 0:
raise TypeError("Width must be a non-negative integer, not {!r}"
.format(self.width))
def shape(self):
return Shape(self.width, self.signed)
def _rhs_signals(self):
return SignalSet()
@final
class AnyConst(AnyValue):
def __repr__(self):
return "(anyconst {}'{})".format(self.width, "s" if self.signed else "")
@final
class AnySeq(AnyValue):
def __repr__(self):
return "(anyseq {}'{})".format(self.width, "s" if self.signed else "")
@final
class Operator(Value):
def __init__(self, operator, operands, *, src_loc_at=0):
super().__init__(src_loc_at=1 + src_loc_at)
self.operator = operator
self.operands = [Value.cast(op) for op in operands]
def shape(self):
def _bitwise_binary_shape(a_shape, b_shape):
a_bits, a_sign = a_shape
b_bits, b_sign = b_shape
if not a_sign and not b_sign:
# both operands unsigned
return Shape(max(a_bits, b_bits), False)
elif a_sign and b_sign:
# both operands signed
return Shape(max(a_bits, b_bits), True)
elif not a_sign and b_sign:
# first operand unsigned (add sign bit), second operand signed
return Shape(max(a_bits + 1, b_bits), True)
else:
# first signed, second operand unsigned (add sign bit)
return Shape(max(a_bits, b_bits + 1), True)
op_shapes = list(map(lambda x: x.shape(), self.operands))
if len(op_shapes) == 1:
(a_width, a_signed), = op_shapes
if self.operator in ("+", "~"):
return Shape(a_width, a_signed)
if self.operator == "-":
return Shape(a_width + 1, True)
if self.operator in ("b", "r|", "r&", "r^"):
return Shape(1, False)
if self.operator == "u":
return Shape(a_width, False)
if self.operator == "s":
return Shape(a_width, True)
elif len(op_shapes) == 2:
(a_width, a_signed), (b_width, b_signed) = op_shapes
if self.operator in ("+", "-"):
width, signed = _bitwise_binary_shape(*op_shapes)
return Shape(width + 1, signed)
if self.operator == "*":
return Shape(a_width + b_width, a_signed or b_signed)
if self.operator in ("//", "%"):
assert not b_signed
return Shape(a_width, a_signed)
if self.operator in ("<", "<=", "==", "!=", ">", ">="):
return Shape(1, False)
if self.operator in ("&", "^", "|"):
return _bitwise_binary_shape(*op_shapes)
if self.operator == "<<":
assert not b_signed
return Shape(a_width + 2 ** b_width - 1, a_signed)
if self.operator == ">>":
assert not b_signed
return Shape(a_width, a_signed)
elif len(op_shapes) == 3:
if self.operator == "m":
s_shape, a_shape, b_shape = op_shapes
return _bitwise_binary_shape(a_shape, b_shape)
raise NotImplementedError("Operator {}/{} not implemented"
.format(self.operator, len(op_shapes))) # :nocov:
def _rhs_signals(self):
return union(op._rhs_signals() for op in self.operands)
def __repr__(self):
return "({} {})".format(self.operator, " ".join(map(repr, self.operands)))
def Mux(sel, val1, val0):
"""Choose between two values.
Parameters
----------
sel : Value, in
Selector.
val1 : Value, in
val0 : Value, in
Input values.
Returns
-------
Value, out
Output ``Value``. If ``sel`` is asserted, the Mux returns ``val1``, else ``val0``.
"""
sel = Value.cast(sel)
if len(sel) != 1:
sel = sel.bool()
return Operator("m", [sel, val1, val0])
@final
class Slice(Value):
def __init__(self, value, start, stop, *, src_loc_at=0):
if not isinstance(start, int):
raise TypeError("Slice start must be an integer, not {!r}".format(start))
if not isinstance(stop, int):
raise TypeError("Slice stop must be an integer, not {!r}".format(stop))
n = len(value)
if start not in range(-(n+1), n+1):
raise IndexError("Cannot start slice {} bits into {}-bit value".format(start, n))
if start < 0:
start += n
if stop not in range(-(n+1), n+1):
raise IndexError("Cannot stop slice {} bits into {}-bit value".format(stop, n))
if stop < 0:
stop += n
if start > stop:
raise IndexError("Slice start {} must be less than slice stop {}".format(start, stop))
super().__init__(src_loc_at=src_loc_at)
self.value = Value.cast(value)
self.start = start
self.stop = stop
def shape(self):
return Shape(self.stop - self.start)
def _lhs_signals(self):
return self.value._lhs_signals()
def _rhs_signals(self):
return self.value._rhs_signals()
def __repr__(self):
return "(slice {} {}:{})".format(repr(self.value), self.start, self.stop)
@final
class Part(Value):
def __init__(self, value, offset, width, stride=1, *, src_loc_at=0):
if not isinstance(width, int) or width < 0:
raise TypeError("Part width must be a non-negative integer, not {!r}".format(width))
if not isinstance(stride, int) or stride <= 0:
raise TypeError("Part stride must be a positive integer, not {!r}".format(stride))
super().__init__(src_loc_at=src_loc_at)
self.value = value
self.offset = Value.cast(offset)
self.width = width
self.stride = stride
def shape(self):
return Shape(self.width)
def _lhs_signals(self):
return self.value._lhs_signals()
def _rhs_signals(self):
return self.value._rhs_signals() | self.offset._rhs_signals()
def __repr__(self):
return "(part {} {} {} {})".format(repr(self.value), repr(self.offset),
self.width, self.stride)
@final
class Cat(Value):
"""Concatenate values.
Form a compound ``Value`` from several smaller ones by concatenation.
The first argument occupies the lower bits of the result.
The return value can be used on either side of an assignment, that
is, the concatenated value can be used as an argument on the RHS or
as a target on the LHS. If it is used on the LHS, it must solely
consist of ``Signal`` s, slices of ``Signal`` s, and other concatenations
meeting these properties. The bit length of the return value is the sum of
the bit lengths of the arguments::
len(Cat(args)) == sum(len(arg) for arg in args)
Parameters
----------
*args : Values or iterables of Values, inout
``Value`` s to be concatenated.
Returns
-------
Value, inout
Resulting ``Value`` obtained by concatentation.
"""
def __init__(self, *args, src_loc_at=0):
super().__init__(src_loc_at=src_loc_at)
self.parts = [Value.cast(v) for v in flatten(args)]
def shape(self):
return Shape(sum(len(part) for part in self.parts))
def _lhs_signals(self):
return union((part._lhs_signals() for part in self.parts), start=SignalSet())
def _rhs_signals(self):
return union((part._rhs_signals() for part in self.parts), start=SignalSet())
def _as_const(self):
value = 0
for part in reversed(self.parts):
value <<= len(part)
value |= part._as_const()
return value
def __repr__(self):
return "(cat {})".format(" ".join(map(repr, self.parts)))
@final
class Repl(Value):
"""Replicate a value
An input value is replicated (repeated) several times
to be used on the RHS of assignments::
len(Repl(s, n)) == len(s) * n
Parameters
----------
value : Value, in
Input value to be replicated.
count : int
Number of replications.
Returns
-------
Repl, out
Replicated value.
"""
def __init__(self, value, count, *, src_loc_at=0):
if not isinstance(count, int) or count < 0:
raise TypeError("Replication count must be a non-negative integer, not {!r}"
.format(count))
super().__init__(src_loc_at=src_loc_at)
self.value = Value.cast(value)
self.count = count
def shape(self):
return Shape(len(self.value) * self.count)
def _rhs_signals(self):
return self.value._rhs_signals()
def __repr__(self):
return "(repl {!r} {})".format(self.value, self.count)
# @final
class Signal(Value, DUID):
"""A varying integer value.
Parameters
----------
shape : ``Shape``-castable object or None
Specification for the number of bits in this ``Signal`` and its signedness (whether it
can represent negative values). See ``Shape.cast`` for details.
If not specified, ``shape`` defaults to 1-bit and non-signed.
name : str
Name hint for this signal. If ``None`` (default) the name is inferred from the variable
name this ``Signal`` is assigned to.
reset : int or integral Enum
Reset (synchronous) or default (combinatorial) value.
When this ``Signal`` is assigned to in synchronous context and the corresponding clock
domain is reset, the ``Signal`` assumes the given value. When this ``Signal`` is unassigned
in combinatorial context (due to conditional assignments not being taken), the ``Signal``
assumes its ``reset`` value. Defaults to 0.
reset_less : bool
If ``True``, do not generate reset logic for this ``Signal`` in synchronous statements.
The ``reset`` value is only used as a combinatorial default or as the initial value.
Defaults to ``False``.
attrs : dict
Dictionary of synthesis attributes.
decoder : function or Enum
A function converting integer signal values to human-readable strings (e.g. FSM state
names). If an ``Enum`` subclass is passed, it is concisely decoded using format string
``"{0.name:}/{0.value:}"``, or a number if the signal value is not a member of
the enumeration.
Attributes
----------
width : int
signed : bool
name : str
reset : int
reset_less : bool
attrs : dict
decoder : function
"""
def __init__(self, shape=None, *, name=None, reset=0, reset_less=False,
attrs=None, decoder=None, src_loc_at=0):
super().__init__(src_loc_at=src_loc_at)
if name is not None and not isinstance(name, str):
raise TypeError("Name must be a string, not {!r}".format(name))
self.name = name or tracer.get_var_name(depth=2 + src_loc_at, default="$signal")
if shape is None:
shape = unsigned(1)
self.width, self.signed = Shape.cast(shape, src_loc_at=1 + src_loc_at)
if isinstance(reset, Enum):
reset = reset.value
if not isinstance(reset, int):
raise TypeError("Reset value has to be an int or an integral Enum")
reset_width = bits_for(reset, self.signed)
if reset != 0 and reset_width > self.width:
warnings.warn("Reset value {!r} requires {} bits to represent, but the signal "
"only has {} bits"
.format(reset, reset_width, self.width),
SyntaxWarning, stacklevel=2 + src_loc_at)
self.reset = reset
self.reset_less = bool(reset_less)
self.attrs = OrderedDict(() if attrs is None else attrs)
if decoder is None and isinstance(shape, type) and issubclass(shape, Enum):
decoder = shape
if isinstance(decoder, type) and issubclass(decoder, Enum):
def enum_decoder(value):
try:
return "{0.name:}/{0.value:}".format(decoder(value))
except ValueError:
return str(value)
self.decoder = enum_decoder
self._enum_class = decoder
else:
self.decoder = decoder
self._enum_class = None
# Not a @classmethod because nmigen.compat requires it.
@staticmethod
def like(other, *, name=None, name_suffix=None, src_loc_at=0, **kwargs):
"""Create Signal based on another.
Parameters
----------
other : Value
Object to base this Signal on.
"""
if name is not None:
new_name = str(name)
elif name_suffix is not None:
new_name = other.name + str(name_suffix)
else:
new_name = tracer.get_var_name(depth=2 + src_loc_at, default="$like")
kw = dict(shape=Value.cast(other).shape(), name=new_name)
if isinstance(other, Signal):
kw.update(reset=other.reset, reset_less=other.reset_less,
attrs=other.attrs, decoder=other.decoder)
kw.update(kwargs)
return Signal(**kw, src_loc_at=1 + src_loc_at)
def shape(self):
return Shape(self.width, self.signed)
def _lhs_signals(self):
return SignalSet((self,))
def _rhs_signals(self):
return SignalSet((self,))
def __repr__(self):
return "(sig {})".format(self.name)
@final
class ClockSignal(Value):
"""Clock signal for a clock domain.
Any ``ClockSignal`` is equivalent to ``cd.clk`` for a clock domain with the corresponding name.
All of these signals ultimately refer to the same signal, but they can be manipulated
independently of the clock domain, even before the clock domain is created.
Parameters
----------
domain : str
Clock domain to obtain a clock signal for. Defaults to ``"sync"``.
"""
def __init__(self, domain="sync", *, src_loc_at=0):
super().__init__(src_loc_at=src_loc_at)
if not isinstance(domain, str):
raise TypeError("Clock domain name must be a string, not {!r}".format(domain))
if domain == "comb":
raise ValueError("Domain '{}' does not have a clock".format(domain))
self.domain = domain
def shape(self):
return Shape(1)
def _lhs_signals(self):
return SignalSet((self,))
def _rhs_signals(self):
raise NotImplementedError("ClockSignal must be lowered to a concrete signal") # :nocov:
def __repr__(self):
return "(clk {})".format(self.domain)
@final
class ResetSignal(Value):
"""Reset signal for a clock domain.
Any ``ResetSignal`` is equivalent to ``cd.rst`` for a clock domain with the corresponding name.
All of these signals ultimately refer to the same signal, but they can be manipulated
independently of the clock domain, even before the clock domain is created.
Parameters
----------
domain : str
Clock domain to obtain a reset signal for. Defaults to ``"sync"``.
allow_reset_less : bool
If the clock domain is reset-less, act as a constant ``0`` instead of reporting an error.
"""
def __init__(self, domain="sync", allow_reset_less=False, *, src_loc_at=0):
super().__init__(src_loc_at=src_loc_at)
if not isinstance(domain, str):
raise TypeError("Clock domain name must be a string, not {!r}".format(domain))
if domain == "comb":
raise ValueError("Domain '{}' does not have a reset".format(domain))
self.domain = domain
self.allow_reset_less = allow_reset_less
def shape(self):
return Shape(1)
def _lhs_signals(self):
return SignalSet((self,))
def _rhs_signals(self):
raise NotImplementedError("ResetSignal must be lowered to a concrete signal") # :nocov:
def __repr__(self):
return "(rst {})".format(self.domain)
class Array(MutableSequence):
"""Addressable multiplexer.
An array is similar to a ``list`` that can also be indexed by ``Value``s; indexing by an integer or a slice works the same as for Python lists, but indexing by a ``Value`` results
in a proxy.
The array proxy can be used as an ordinary ``Value``, i.e. participate in calculations and
assignments, provided that all elements of the array are values. The array proxy also supports
attribute access and further indexing, each returning another array proxy; this means that
the results of indexing into arrays, arrays of records, and arrays of arrays can all
be used as first-class values.
It is an error to change an array or any of its elements after an array proxy was created.
Changing the array directly will raise an exception. However, it is not possible to detect
the elements being modified; if an element's attribute or element is modified after the proxy
for it has been created, the proxy will refer to stale data.
Examples
--------
Simple array::
gpios = Array(Signal() for _ in range(10))
with m.If(bus.we):
m.d.sync += gpios[bus.addr].eq(bus.w_data)
with m.Else():
m.d.sync += bus.r_data.eq(gpios[bus.addr])
Multidimensional array::
mult = Array(Array(x * y for y in range(10)) for x in range(10))
a = Signal.range(10)
b = Signal.range(10)
r = Signal(8)
m.d.comb += r.eq(mult[a][b])
Array of records::
layout = [
("r_data", 16),
("r_en", 1),
]
buses = Array(Record(layout) for busno in range(4))
master = Record(layout)
m.d.comb += [
buses[sel].r_en.eq(master.r_en),
master.r_data.eq(buses[sel].r_data),
]
"""
def __init__(self, iterable=()):
self._inner = list(iterable)
self._proxy_at = None
self._mutable = True
def __getitem__(self, index):
if isinstance(index, Value):
if self._mutable:
self._proxy_at = tracer.get_src_loc()
self._mutable = False
return ArrayProxy(self, index)
else:
return self._inner[index]
def __len__(self):
return len(self._inner)
def _check_mutability(self):
if not self._mutable:
raise ValueError("Array can no longer be mutated after it was indexed with a value "
"at {}:{}".format(*self._proxy_at))
def __setitem__(self, index, value):
self._check_mutability()
self._inner[index] = value
def __delitem__(self, index):
self._check_mutability()
del self._inner[index]
def insert(self, index, value):
self._check_mutability()
self._inner.insert(index, value)
def __repr__(self):
return "(array{} [{}])".format(" mutable" if self._mutable else "",
", ".join(map(repr, self._inner)))
@final
class ArrayProxy(Value):
def __init__(self, elems, index, *, src_loc_at=0):
super().__init__(src_loc_at=1 + src_loc_at)
self.elems = elems
self.index = Value.cast(index)
def __getattr__(self, attr):
return ArrayProxy([getattr(elem, attr) for elem in self.elems], self.index)
def __getitem__(self, index):
return ArrayProxy([ elem[index] for elem in self.elems], self.index)
def _iter_as_values(self):
return (Value.cast(elem) for elem in self.elems)
def shape(self):
unsigned_width = signed_width = 0
has_unsigned = has_signed = False
for elem_width, elem_signed in (elem.shape() for elem in self._iter_as_values()):
if elem_signed:
has_signed = True
signed_width = max(signed_width, elem_width)
else:
has_unsigned = True
unsigned_width = max(unsigned_width, elem_width)
# The shape of the proxy must be such that it preserves the mathematical value of the array
# elements. I.e., shape-wise, an array proxy must be identical to an equivalent mux tree.
# To ensure this holds, if the array contains both signed and unsigned values, make sure
# that every unsigned value is zero-extended by at least one bit.
if has_signed and has_unsigned and unsigned_width >= signed_width:
# Array contains both signed and unsigned values, and at least one of the unsigned
# values won't be zero-extended otherwise.
return signed(unsigned_width + 1)
else:
# Array contains values of the same signedness, or else all of the unsigned values
# are zero-extended.
return Shape(max(unsigned_width, signed_width), has_signed)
def _lhs_signals(self):
signals = union((elem._lhs_signals() for elem in self._iter_as_values()),
start=SignalSet())
return signals
def _rhs_signals(self):
signals = union((elem._rhs_signals() for elem in self._iter_as_values()),
start=SignalSet())
return self.index._rhs_signals() | signals
def __repr__(self):
return "(proxy (array [{}]) {!r})".format(", ".join(map(repr, self.elems)), self.index)
# TODO(nmigen-0.4): remove
class UserValue(Value):
"""Value with custom lowering.
A ``UserValue`` is a value whose precise representation does not have to be immediately known,
which is useful in certain metaprogramming scenarios. Instead of providing fixed semantics
upfront, it is kept abstract for as long as possible, only being lowered to a concrete nMigen
value when required.
Note that the ``lower`` method will only be called once; this is necessary to ensure that
nMigen's view of representation of all values stays internally consistent. If the class
deriving from ``UserValue`` is mutable, then it must ensure that after ``lower`` is called,
it is not mutated in a way that changes its representation.
The following is an incomplete list of actions that, when applied to an ``UserValue`` directly
or indirectly, will cause it to be lowered, provided as an illustrative reference:
* Querying the shape using ``.shape()`` or ``len()``;
* Creating a similarly shaped signal using ``Signal.like``;
* Indexing or iterating through individual bits;
* Adding an assignment to the value to a ``Module`` using ``m.d.<domain> +=``.
"""
@deprecated("instead of `UserValue`, use `ValueCastable`", stacklevel=3)
def __init__(self, *, src_loc_at=0):
super().__init__(src_loc_at=1 + src_loc_at)
self.__lowered = None
@abstractmethod
def lower(self):
"""Conversion to a concrete representation."""
pass # :nocov:
def _lazy_lower(self):
if self.__lowered is None:
lowered = self.lower()
if isinstance(lowered, UserValue):
lowered = lowered._lazy_lower()
self.__lowered = Value.cast(lowered)
return self.__lowered
def shape(self):
return self._lazy_lower().shape()
def _lhs_signals(self):
return self._lazy_lower()._lhs_signals()
def _rhs_signals(self):
return self._lazy_lower()._rhs_signals()
class ValueCastable:
"""Base class for classes which can be cast to Values.
A ``ValueCastable`` can be cast to ``Value``, meaning its precise representation does not have
to be immediately known. This is useful in certain metaprogramming scenarios. Instead of
providing fixed semantics upfront, it is kept abstract for as long as possible, only being
cast to a concrete nMigen value when required.
Note that it is necessary to ensure that nMigen's view of representation of all values stays
internally consistent. The class deriving from ``ValueCastable`` must decorate the ``as_value``
method with the ``lowermethod`` decorator, which ensures that all calls to ``as_value`` return
the same ``Value`` representation. If the class deriving from ``ValueCastable`` is mutable,
it is up to the user to ensure that it is not mutated in a way that changes its representation
after the first call to ``as_value``.
"""
def __new__(cls, *args, **kwargs):
self = super().__new__(cls)
if not hasattr(self, "as_value"):
raise TypeError(f"Class '{cls.__name__}' deriving from `ValueCastable` must override "
"the `as_value` method")
if not hasattr(self.as_value, "_ValueCastable__memoized"):
raise TypeError(f"Class '{cls.__name__}' deriving from `ValueCastable` must decorate "
"the `as_value` method with the `ValueCastable.lowermethod` decorator")
return self
@staticmethod
def lowermethod(func):
"""Decorator to memoize lowering methods.
Ensures the decorated method is called only once, with subsequent method calls returning the
object returned by the first first method call.
This decorator is required to decorate the ``as_value`` method of ``ValueCastable`` subclasses.
This is to ensure that nMigen's view of representation of all values stays internally
consistent.
"""
@functools.wraps(func)
def wrapper_memoized(self, *args, **kwargs):
if not hasattr(self, "_ValueCastable__lowered_to"):
self.__lowered_to = func(self, *args, **kwargs)
return self.__lowered_to
wrapper_memoized.__memoized = True
return wrapper_memoized
@final
class Sample(Value):
"""Value from the past.
A ``Sample`` of an expression is equal to the value of the expression ``clocks`` clock edges
of the ``domain`` clock back. If that moment is before the beginning of time, it is equal
to the value of the expression calculated as if each signal had its reset value.
"""
def __init__(self, expr, clocks, domain, *, src_loc_at=0):
super().__init__(src_loc_at=1 + src_loc_at)
self.value = Value.cast(expr)
self.clocks = int(clocks)
self.domain = domain
if not isinstance(self.value, (Const, Signal, ClockSignal, ResetSignal, Initial)):
raise TypeError("Sampled value must be a signal or a constant, not {!r}"
.format(self.value))
if self.clocks < 0:
raise ValueError("Cannot sample a value {} cycles in the future"
.format(-self.clocks))
if not (self.domain is None or isinstance(self.domain, str)):
raise TypeError("Domain name must be a string or None, not {!r}"
.format(self.domain))
def shape(self):
return self.value.shape()
def _rhs_signals(self):
return SignalSet((self,))
def __repr__(self):
return "(sample {!r} @ {}[{}])".format(
self.value, "<default>" if self.domain is None else self.domain, self.clocks)
def Past(expr, clocks=1, domain=None):
return Sample(expr, clocks, domain)
def Stable(expr, clocks=0, domain=None):
return Sample(expr, clocks + 1, domain) == Sample(expr, clocks, domain)
def Rose(expr, clocks=0, domain=None):
return ~Sample(expr, clocks + 1, domain) & Sample(expr, clocks, domain)
def Fell(expr, clocks=0, domain=None):
return Sample(expr, clocks + 1, domain) & ~Sample(expr, clocks, domain)
@final
class Initial(Value):
"""Start indicator, for model checking.
An ``Initial`` signal is ``1`` at the first cycle of model checking, and ``0`` at any other.
"""
def __init__(self, *, src_loc_at=0):
super().__init__(src_loc_at=src_loc_at)
def shape(self):
return Shape(1)
def _rhs_signals(self):
return SignalSet((self,))
def __repr__(self):
return "(initial)"
class _StatementList(list):
def __repr__(self):
return "({})".format(" ".join(map(repr, self)))
class Statement:
def __init__(self, *, src_loc_at=0):
self.src_loc = tracer.get_src_loc(1 + src_loc_at)
@staticmethod
def cast(obj):
if isinstance(obj, Iterable):
return _StatementList(sum((Statement.cast(e) for e in obj), []))
else:
if isinstance(obj, Statement):
return _StatementList([obj])
else:
raise TypeError("Object {!r} is not an nMigen statement".format(obj))
@final
class Assign(Statement):
def __init__(self, lhs, rhs, *, src_loc_at=0):
super().__init__(src_loc_at=src_loc_at)
self.lhs = Value.cast(lhs)
self.rhs = Value.cast(rhs)
def _lhs_signals(self):
return self.lhs._lhs_signals()
def _rhs_signals(self):
return self.lhs._rhs_signals() | self.rhs._rhs_signals()
def __repr__(self):
return "(eq {!r} {!r})".format(self.lhs, self.rhs)
class UnusedProperty(UnusedMustUse):
pass
class Property(Statement, MustUse):
_MustUse__warning = UnusedProperty
def __init__(self, test, *, _check=None, _en=None, src_loc_at=0):
super().__init__(src_loc_at=src_loc_at)
self.test = Value.cast(test)
self._check = _check
self._en = _en
if self._check is None:
self._check = Signal(reset_less=True, name="${}$check".format(self._kind))
self._check.src_loc = self.src_loc
if _en is None:
self._en = Signal(reset_less=True, name="${}$en".format(self._kind))
self._en.src_loc = self.src_loc
def _lhs_signals(self):
return SignalSet((self._en, self._check))
def _rhs_signals(self):
return self.test._rhs_signals()
def __repr__(self):
return "({} {!r})".format(self._kind, self.test)
@final
class Assert(Property):
_kind = "assert"
@final
class Assume(Property):
_kind = "assume"
@final
class Cover(Property):
_kind = "cover"
# @final
class Switch(Statement):
def __init__(self, test, cases, *, src_loc=None, src_loc_at=0, case_src_locs={}):
if src_loc is None:
super().__init__(src_loc_at=src_loc_at)
else:
# Switch is a bit special in terms of location tracking because it is usually created
# long after the control has left the statement that directly caused its creation.
self.src_loc = src_loc
# Switch is also a bit special in that its parts also have location information. It can't
# be automatically traced, so whatever constructs a Switch may optionally provide it.
self.case_src_locs = {}
self.test = Value.cast(test)
self.cases = OrderedDict()
for orig_keys, stmts in cases.items():
# Map: None -> (); key -> (key,); (key...) -> (key...)
keys = orig_keys
if keys is None:
keys = ()
if not isinstance(keys, tuple):
keys = (keys,)
# Map: 2 -> "0010"; "0010" -> "0010"
new_keys = ()
key_mask = (1 << len(self.test)) - 1
for key in keys:
if isinstance(key, str):
key = "".join(key.split()) # remove whitespace
elif isinstance(key, int):
key = format(key & key_mask, "b").rjust(len(self.test), "0")
elif isinstance(key, Enum):
key = format(key.value & key_mask, "b").rjust(len(self.test), "0")
else:
raise TypeError("Object {!r} cannot be used as a switch key"
.format(key))
assert len(key) == len(self.test)
new_keys = (*new_keys, key)
if not isinstance(stmts, Iterable):
stmts = [stmts]
self.cases[new_keys] = Statement.cast(stmts)
if orig_keys in case_src_locs:
self.case_src_locs[new_keys] = case_src_locs[orig_keys]
def _lhs_signals(self):
signals = union((s._lhs_signals() for ss in self.cases.values() for s in ss),
start=SignalSet())
return signals
def _rhs_signals(self):
signals = union((s._rhs_signals() for ss in self.cases.values() for s in ss),
start=SignalSet())
return self.test._rhs_signals() | signals
def __repr__(self):
def case_repr(keys, stmts):
stmts_repr = " ".join(map(repr, stmts))
if keys == ():
return "(default {})".format(stmts_repr)
elif len(keys) == 1:
return "(case {} {})".format(keys[0], stmts_repr)
else:
return "(case ({}) {})".format(" ".join(keys), stmts_repr)
case_reprs = [case_repr(keys, stmts) for keys, stmts in self.cases.items()]
return "(switch {!r} {})".format(self.test, " ".join(case_reprs))
class _MappedKeyCollection(metaclass=ABCMeta):
@abstractmethod
def _map_key(self, key):
pass # :nocov:
@abstractmethod
def _unmap_key(self, key):
pass # :nocov:
class _MappedKeyDict(MutableMapping, _MappedKeyCollection):
def __init__(self, pairs=()):
self._storage = OrderedDict()
for key, value in pairs:
self[key] = value
def __getitem__(self, key):
key = None if key is None else self._map_key(key)
return self._storage[key]
def __setitem__(self, key, value):
key = None if key is None else self._map_key(key)
self._storage[key] = value
def __delitem__(self, key):
key = None if key is None else self._map_key(key)
del self._storage[key]
def __iter__(self):
for key in self._storage:
if key is None:
yield None
else:
yield self._unmap_key(key)
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
if len(self) != len(other):
return False
for ak, bk in zip(sorted(self._storage), sorted(other._storage)):
if ak != bk:
return False
if self._storage[ak] != other._storage[bk]:
return False
return True
def __len__(self):
return len(self._storage)
def __repr__(self):
pairs = ["({!r}, {!r})".format(k, v) for k, v in self.items()]
return "{}.{}([{}])".format(type(self).__module__, type(self).__name__,
", ".join(pairs))
class _MappedKeySet(MutableSet, _MappedKeyCollection):
def __init__(self, elements=()):
self._storage = OrderedDict()
for elem in elements:
self.add(elem)
def add(self, value):
self._storage[self._map_key(value)] = None
def update(self, values):
for value in values:
self.add(value)
def discard(self, value):
if value in self:
del self._storage[self._map_key(value)]
def __contains__(self, value):
return self._map_key(value) in self._storage
def __iter__(self):
for key in [k for k in self._storage]:
yield self._unmap_key(key)
def __len__(self):
return len(self._storage)
def __repr__(self):
return "{}.{}({})".format(type(self).__module__, type(self).__name__,
", ".join(repr(x) for x in self))
class ValueKey:
def __init__(self, value):
self.value = Value.cast(value)
if isinstance(self.value, Const):
self._hash = hash(self.value.value)
elif isinstance(self.value, (Signal, AnyValue)):
self._hash = hash(self.value.duid)
elif isinstance(self.value, (ClockSignal, ResetSignal)):
self._hash = hash(self.value.domain)
elif isinstance(self.value, Operator):
self._hash = hash((self.value.operator,
tuple(ValueKey(o) for o in self.value.operands)))
elif isinstance(self.value, Slice):
self._hash = hash((ValueKey(self.value.value), self.value.start, self.value.stop))
elif isinstance(self.value, Part):
self._hash = hash((ValueKey(self.value.value), ValueKey(self.value.offset),
self.value.width, self.value.stride))
elif isinstance(self.value, Cat):
self._hash = hash(tuple(ValueKey(o) for o in self.value.parts))
elif isinstance(self.value, ArrayProxy):
self._hash = hash((ValueKey(self.value.index),
tuple(ValueKey(e) for e in self.value._iter_as_values())))
elif isinstance(self.value, Sample):
self._hash = hash((ValueKey(self.value.value), self.value.clocks, self.value.domain))
elif isinstance(self.value, Initial):
self._hash = 0
else: # :nocov:
raise TypeError("Object {!r} cannot be used as a key in value collections"
.format(self.value))
def __hash__(self):
return self._hash
def __eq__(self, other):
if type(other) is not ValueKey:
return False
if type(self.value) is not type(other.value):
return False
if isinstance(self.value, Const):
return self.value.value == other.value.value
elif isinstance(self.value, (Signal, AnyValue)):
return self.value is other.value
elif isinstance(self.value, (ClockSignal, ResetSignal)):
return self.value.domain == other.value.domain
elif isinstance(self.value, Operator):
return (self.value.operator == other.value.operator and
len(self.value.operands) == len(other.value.operands) and
all(ValueKey(a) == ValueKey(b)
for a, b in zip(self.value.operands, other.value.operands)))
elif isinstance(self.value, Slice):
return (ValueKey(self.value.value) == ValueKey(other.value.value) and
self.value.start == other.value.start and
self.value.stop == other.value.stop)
elif isinstance(self.value, Part):
return (ValueKey(self.value.value) == ValueKey(other.value.value) and
ValueKey(self.value.offset) == ValueKey(other.value.offset) and
self.value.width == other.value.width and
self.value.stride == other.value.stride)
elif isinstance(self.value, Cat):
return all(ValueKey(a) == ValueKey(b)
for a, b in zip(self.value.parts, other.value.parts))
elif isinstance(self.value, ArrayProxy):
return (ValueKey(self.value.index) == ValueKey(other.value.index) and
len(self.value.elems) == len(other.value.elems) and
all(ValueKey(a) == ValueKey(b)
for a, b in zip(self.value._iter_as_values(),
other.value._iter_as_values())))
elif isinstance(self.value, Sample):
return (ValueKey(self.value.value) == ValueKey(other.value.value) and
self.value.clocks == other.value.clocks and
self.value.domain == self.value.domain)
elif isinstance(self.value, Initial):
return True
else: # :nocov:
raise TypeError("Object {!r} cannot be used as a key in value collections"
.format(self.value))
def __lt__(self, other):
if not isinstance(other, ValueKey):
return False
if type(self.value) != type(other.value):
return False
if isinstance(self.value, Const):
return self.value < other.value
elif isinstance(self.value, (Signal, AnyValue)):
return self.value.duid < other.value.duid
elif isinstance(self.value, Slice):
return (ValueKey(self.value.value) < ValueKey(other.value.value) and
self.value.start < other.value.start and
self.value.end < other.value.end)
else: # :nocov:
raise TypeError("Object {!r} cannot be used as a key in value collections")
def __repr__(self):
return "<{}.ValueKey {!r}>".format(__name__, self.value)
class ValueDict(_MappedKeyDict):
_map_key = ValueKey
_unmap_key = lambda self, key: key.value
class ValueSet(_MappedKeySet):
_map_key = ValueKey
_unmap_key = lambda self, key: key.value
class SignalKey:
def __init__(self, signal):
self.signal = signal
if isinstance(signal, Signal):
self._intern = (0, signal.duid)
elif type(signal) is ClockSignal:
self._intern = (1, signal.domain)
elif type(signal) is ResetSignal:
self._intern = (2, signal.domain)
else:
raise TypeError("Object {!r} is not an nMigen signal".format(signal))
def __hash__(self):
return hash(self._intern)
def __eq__(self, other):
if type(other) is not SignalKey:
return False
return self._intern == other._intern
def __lt__(self, other):
if type(other) is not SignalKey:
raise TypeError("Object {!r} cannot be compared to a SignalKey".format(signal))
return self._intern < other._intern
def __repr__(self):
return "<{}.SignalKey {!r}>".format(__name__, self.signal)
class SignalDict(_MappedKeyDict):
_map_key = SignalKey
_unmap_key = lambda self, key: key.signal
class SignalSet(_MappedKeySet):
_map_key = SignalKey
_unmap_key = lambda self, key: key.signal
|
py | b41084d61a5c962badb8510953650aba4cf8b713 | #!/usr/bin/python
"""
(C) Copyright 2020 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Government's rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
"""
from apricot import TestWithServers
from command_utils_base import ObjectWithParameters, BasicParameter
from test_utils_pool import TestPool
from test_utils_container import TestContainer
class RebuildTestParams(ObjectWithParameters):
"""Class for gathering test parameters."""
def __init__(self):
"""Initialize a RebuildTestParams object."""
super(RebuildTestParams, self).__init__("/run/rebuild/*")
self.object_class = BasicParameter(None)
self.rank = BasicParameter(None)
class RebuildTestBase(TestWithServers):
"""Base rebuild test class.
:avocado: recursive
"""
def __init__(self, *args, **kwargs):
"""Initialize a RebuildTestBase object."""
super(RebuildTestBase, self).__init__(*args, **kwargs)
self.inputs = RebuildTestParams()
self.targets = None
self.server_count = 0
self.info_checks = None
self.rebuild_checks = None
def setUp(self):
"""Set up each test case."""
# Start the servers and agents
super(RebuildTestBase, self).setUp()
# Get the test parameters
self.inputs.get_params(self)
# Get the number of targets per server for pool info calculations
self.targets = self.params.get("targets", "/run/server_config/*")
self.server_count = len(self.hostlist_servers)
def setup_test_pool(self):
"""Define a TestPool object."""
self.pool = TestPool(self.context, dmg_command=self.get_dmg_command())
self.pool.get_params(self)
def setup_test_container(self):
"""Define a TestContainer object."""
self.container = TestContainer(self.pool)
self.container.get_params(self)
def setup_pool_verify(self):
"""Set up pool verification initial expected values."""
self.info_checks = {
"pi_uuid": self.pool.uuid,
"pi_nnodes": self.server_count,
"pi_ntargets": (self.server_count * self.targets),
"pi_ndisabled": 0,
}
self.rebuild_checks = {
"rs_done": 1,
"rs_obj_nr": 0,
"rs_rec_nr": 0,
"rs_errno": 0,
}
def update_pool_verify(self):
"""Update the pool verification expected values."""
self.info_checks["pi_ndisabled"] = ">0"
self.rebuild_checks["rs_obj_nr"] = ">0"
self.rebuild_checks["rs_rec_nr"] = ">0"
def execute_pool_verify(self, msg=None):
"""Verify the pool info.
Args:
msg (str, optional): additional information to include in the error
message. Defaults to None.
"""
status = self.pool.check_pool_info(**self.info_checks)
status &= self.pool.check_rebuild_status(**self.rebuild_checks)
self.assertTrue(
status,
"Error confirming pool info{}".format("" if msg is None else msg))
def create_test_pool(self):
"""Create the pool and verify its info."""
# Create a pool
self.pool.create()
# Verify the pool information before rebuild
self.setup_pool_verify()
self.execute_pool_verify(" before rebuild")
def create_test_container(self):
"""Create a container and write objects."""
if self.container is not None:
self.container.create()
self.container.write_objects(
self.inputs.rank.value, self.inputs.object_class.value)
def verify_rank_has_objects(self):
"""Verify the rank to be excluded has at least one object."""
if self.container is not None:
rank = self.inputs.rank.value
rank_list = self.container.get_target_rank_lists(" before rebuild")
qty = self.container.get_target_rank_count(rank, rank_list)
self.assertGreater(
qty, 0, "No objects written to rank {}".format(rank))
def verify_rank_has_no_objects(self):
"""Verify the excluded rank has zero objects."""
if self.container is not None:
rank = self.inputs.rank.value
rank_list = self.container.get_target_rank_lists(" after rebuild")
qty = self.container.get_target_rank_count(rank, rank_list)
self.assertEqual(
qty, 0, "Excluded rank {} still has objects".format(rank))
def start_rebuild(self):
"""Start the rebuild process."""
# Exclude the rank from the pool to initiate rebuild
if isinstance(self.inputs.rank.value, list):
self.pool.start_rebuild(self.inputs.rank.value, self.d_log)
else:
self.pool.start_rebuild([self.inputs.rank.value], self.d_log)
# Wait for rebuild to start
self.pool.wait_for_rebuild(True, 1)
def execute_during_rebuild(self):
"""Execute test steps during rebuild."""
pass
def verify_container_data(self, txn=None):
"""Verify the container data.
Args:
txn (int, optional): transaction timestamp to read. Defaults to None
which uses the last timestamp written.
"""
if self.container is not None:
self.assertTrue(
self.container.read_objects(txn),
"Error verifying contianer data")
def execute_rebuild_test(self, create_container=True):
"""Execute the rebuild test steps.
Args:
create_container (bool, optional): should the test create a
container. Defaults to True.
"""
# Get the test params
self.setup_test_pool()
if create_container:
self.setup_test_container()
# Create a pool and verify the pool information before rebuild
self.create_test_pool()
# Create a container and write objects
self.create_test_container()
# Verify the rank to be excluded has at least one object
self.verify_rank_has_objects()
# Start the rebuild process
self.start_rebuild()
# Execute the test steps during rebuild
self.execute_during_rebuild()
# Confirm rebuild completes
self.pool.wait_for_rebuild(False, 1)
# Verify the excluded rank is no longer used with the objects
self.verify_rank_has_no_objects()
# Verify the pool information after rebuild
self.update_pool_verify()
self.execute_pool_verify(" after rebuild")
# Verify the container data can still be accessed
self.verify_container_data()
self.log.info("Test passed")
|
py | b41084d69e012b46f94481015eb80d45b558ab02 | '''Label
=====
.. image:: images/label.png
:align: right
The :class:`Label` widget is for rendering text. It supports ascii and unicode
strings::
# hello world text
l = Label(text='Hello world')
# unicode text; can only display glyphs that are available in the font
l = Label(text=u'Hello world ' + unichr(2764))
# multiline text
l = Label(text='Multi\\nLine')
# size
l = Label(text='Hello world', font_size='20sp')
.. _kivy-uix-label-sizing-and-text-content:
Sizing and text content
---------------------------
By default, the size of :class:`Label` is not affected by :attr:`~Label.text`
content and the text is not affected by the size. In order to control
sizing, you must specify :attr:`~Label.text_size` to constrain the text
and/or bind :attr:`~Label.size` to :attr:`~Label.texture_size` to grow with
the text.
For example, this label's size will be set to the text content
(plus :attr:`~Label.padding`):
.. code-block:: kv
Label:
size: self.texture_size
This label's text will wrap at the specified width and be clipped to the height:
.. code-block:: kv
Label:
text_size: cm(6), cm(4)
.. note:: The :attr:`~Label.shorten` and :attr:`~Label.max_lines` attributes
control how overflowing text behaves.
Combine these concepts to create a Label that can grow vertically but wraps the
text at a certain width:
.. code-block:: kv
Label:
text_size: root.width, None
size: self.texture_size
Text alignment and wrapping
---------------------------
The :class:`Label` has :attr:`~Label.halign` and :attr:`~Label.valign`
properties to control the alignment of its text. However, by default the text
image (:attr:`~Label.texture`) is only just large enough to contain the
characters and is positioned in the center of the Label. The valign property
will have no effect and halign will only have an effect if your text has
newlines; a single line of text will appear to be centered even though halign is
set to left (by default).
In order for the alignment properties to take effect, set the
:attr:`~Label.text_size`, which specifies the size of the bounding box within
which text is aligned. For instance, the following code binds this size to the
size of the Label, so text will be aligned within the widget bounds. This
will also automatically wrap the text of the Label to remain within this area.
.. code-block:: kv
Label:
text_size: self.size
halign: 'right'
valign: 'middle'
Markup text
-----------
.. versionadded:: 1.1.0
You can change the style of the text using :doc:`api-kivy.core.text.markup`.
The syntax is similar to the bbcode syntax but only the inline styling is
allowed::
# hello world with world in bold
l = Label(text='Hello [b]World[/b]', markup=True)
# hello in red, world in blue
l = Label(text='[color=ff3333]Hello[/color][color=3333ff]World[/color]',
markup = True)
If you need to escape the markup from the current text, use
:func:`kivy.utils.escape_markup`::
text = 'This is an important message [1]'
l = Label(text='[b]' + escape_markup(text) + '[/b]', markup=True)
The following tags are available:
``[b][/b]``
Activate bold text
``[i][/i]``
Activate italic text
``[u][/u]``
Underlined text
``[s][/s]``
Strikethrough text
``[font=<str>][/font]``
Change the font
``[size=<integer>][/size]``
Change the font size
``[color=#<color>][/color]``
Change the text color
``[ref=<str>][/ref]``
Add an interactive zone. The reference + bounding box inside the
reference will be available in :attr:`Label.refs`
``[anchor=<str>]``
Put an anchor in the text. You can get the position of your anchor within
the text with :attr:`Label.anchors`
``[sub][/sub]``
Display the text at a subscript position relative to the text before it.
``[sup][/sup]``
Display the text at a superscript position relative to the text before it.
If you want to render the markup text with a [ or ] or & character, you need to
escape them. We created a simple syntax::
[ -> &bl;
] -> &br;
& -> &
Then you can write::
"[size=24]Hello &bl;World&bt;[/size]"
Interactive zone in text
------------------------
.. versionadded:: 1.1.0
You can now have definable "links" using text markup. The idea is to be able
to detect when the user clicks on part of the text and to react.
The tag ``[ref=xxx]`` is used for that.
In this example, we are creating a reference on the word "World". When
this word is clicked, the function ``print_it`` will be called with the
name of the reference::
def print_it(instance, value):
print('User clicked on', value)
widget = Label(text='Hello [ref=world]World[/ref]', markup=True)
widget.bind(on_ref_press=print_it)
For prettier rendering, you could add a color for the reference. Replace the
``text=`` in the previous example with::
'Hello [ref=world][color=0000ff]World[/color][/ref]'
Catering for Unicode languages
------------------------------
The font kivy uses does not contain all the characters required for displaying
all languages. When you use the built-in widgets, this results in a block being
drawn where you expect a character.
If you want to display such characters, you can chose a font that supports them
and deploy it universally via kv:
.. code-block:: kv
<Label>:
font_name: '/<path>/<to>/<font>'
Note that this needs to be done before your widgets are loaded as kv rules are
only applied at load time.
Usage example
-------------
The following example marks the anchors and references contained in a label::
from kivy.app import App
from kivy.uix.label import Label
from kivy.clock import Clock
from kivy.graphics import Color, Rectangle
class TestApp(App):
@staticmethod
def get_x(label, ref_x):
""" Return the x value of the ref/anchor relative to the canvas """
return label.center_x - label.texture_size[0] * 0.5 + ref_x
@staticmethod
def get_y(label, ref_y):
""" Return the y value of the ref/anchor relative to the canvas """
# Note the inversion of direction, as y values start at the top of
# the texture and increase downwards
return label.center_y + label.texture_size[1] * 0.5 - ref_y
def show_marks(self, label):
# Indicate the position of the anchors with a red top marker
for name, anc in label.anchors.items():
with label.canvas:
Color(1, 0, 0)
Rectangle(pos=(self.get_x(label, anc[0]),
self.get_y(label, anc[1])),
size=(3, 3))
# Draw a green surround around the refs. Note the sizes y inversion
for name, boxes in label.refs.items():
for box in boxes:
with label.canvas:
Color(0, 1, 0, 0.25)
Rectangle(pos=(self.get_x(label, box[0]),
self.get_y(label, box[1])),
size=(box[2] - box[0],
box[1] - box[3]))
def build(self):
label = Label(
text='[anchor=a]a\\nChars [anchor=b]b\\n[ref=myref]ref[/ref]',
markup=True)
Clock.schedule_once(lambda dt: self.show_marks(label), 1)
return label
TestApp().run()
'''
__all__ = ('Label', )
from kivy.clock import Clock
from kivy.uix.widget import Widget
from kivy.core.text import Label as CoreLabel
from kivy.core.text.markup import MarkupLabel as CoreMarkupLabel
from kivy.properties import StringProperty, OptionProperty, \
NumericProperty, BooleanProperty, ReferenceListProperty, \
ListProperty, ObjectProperty, DictProperty
from kivy.utils import get_hex_from_color
class Label(Widget):
'''Label class, see module documentation for more information.
:Events:
`on_ref_press`
Fired when the user clicks on a word referenced with a
``[ref]`` tag in a text markup.
'''
__events__ = ['on_ref_press']
_font_properties = ('text', 'font_size', 'font_name', 'bold', 'italic',
'underline', 'strikethrough', 'color', 'disabled_color',
'halign', 'valign', 'padding_x', 'padding_y',
'outline_width', 'disabled_outline_color',
'outline_color', 'text_size', 'shorten', 'mipmap',
'line_height', 'max_lines', 'strip', 'shorten_from',
'split_str', 'unicode_errors', 'markup',
'font_hinting', 'font_kerning', 'font_blended')
def __init__(self, **kwargs):
self._trigger_texture = Clock.create_trigger(self.texture_update, -1)
super(Label, self).__init__(**kwargs)
# bind all the property for recreating the texture
d = Label._font_properties
fbind = self.fbind
update = self._trigger_texture_update
fbind('disabled', update, 'disabled')
for x in d:
fbind(x, update, x)
self._label = None
self._create_label()
# force the texture creation
self._trigger_texture()
def _create_label(self):
# create the core label class according to markup value
if self._label is not None:
cls = self._label.__class__
else:
cls = None
markup = self.markup
if (markup and cls is not CoreMarkupLabel) or \
(not markup and cls is not CoreLabel):
# markup have change, we need to change our rendering method.
d = Label._font_properties
dkw = dict(list(zip(d, [getattr(self, x) for x in d])))
if markup:
self._label = CoreMarkupLabel(**dkw)
else:
self._label = CoreLabel(**dkw)
def _trigger_texture_update(self, name=None, source=None, value=None):
# check if the label core class need to be switch to a new one
if name == 'markup':
self._create_label()
if source:
if name == 'text':
self._label.text = value
elif name == 'text_size':
self._label.usersize = value
elif name == 'font_size':
self._label.options[name] = value
elif name == 'disabled_color' and self.disabled:
self._label.options['color'] = value
elif name == 'disabled_outline_color' and self.disabled:
self._label.options['outline_color'] = value
elif name == 'disabled':
self._label.options['color'] = self.disabled_color if value \
else self.color
self._label.options['outline_color'] = (
self.disabled_outline_color if value else
self.outline_color)
else:
self._label.options[name] = value
self._trigger_texture()
def texture_update(self, *largs):
'''Force texture recreation with the current Label properties.
After this function call, the :attr:`texture` and :attr:`texture_size`
will be updated in this order.
'''
mrkup = self._label.__class__ is CoreMarkupLabel
self.texture = None
if (not self._label.text or (self.halign == 'justify' or self.strip)
and not self._label.text.strip()):
self.texture_size = (0, 0)
if mrkup:
self.refs, self._label._refs = {}, {}
self.anchors, self._label._anchors = {}, {}
else:
if mrkup:
text = self.text
# we must strip here, otherwise, if the last line is empty,
# markup will retain the last empty line since it only strips
# line by line within markup
if self.halign == 'justify' or self.strip:
text = text.strip()
self._label.text = ''.join(('[color=',
get_hex_from_color(
self.disabled_color if
self.disabled else self.color),
']', text, '[/color]'))
self._label.refresh()
# force the rendering to get the references
if self._label.texture:
self._label.texture.bind()
self.refs = self._label.refs
self.anchors = self._label.anchors
else:
self._label.refresh()
texture = self._label.texture
if texture is not None:
self.texture = self._label.texture
self.texture_size = list(self.texture.size)
def on_touch_down(self, touch):
if super(Label, self).on_touch_down(touch):
return True
if not len(self.refs):
return False
tx, ty = touch.pos
tx -= self.center_x - self.texture_size[0] / 2.
ty -= self.center_y - self.texture_size[1] / 2.
ty = self.texture_size[1] - ty
for uid, zones in self.refs.items():
for zone in zones:
x, y, w, h = zone
if x <= tx <= w and y <= ty <= h:
self.dispatch('on_ref_press', uid)
return True
return False
def on_ref_press(self, ref):
pass
#
# Properties
#
disabled_color = ListProperty([1, 1, 1, .3])
'''The color of the text when the widget is disabled, in the (r, g, b, a)
format.
.. versionadded:: 1.8.0
:attr:`disabled_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [1, 1, 1, .3].
'''
text = StringProperty('')
'''Text of the label.
Creation of a simple hello world::
widget = Label(text='Hello world')
If you want to create the widget with an unicode string, use::
widget = Label(text=u'My unicode string')
:attr:`text` is a :class:`~kivy.properties.StringProperty` and defaults to
''.
'''
text_size = ListProperty([None, None])
'''By default, the label is not constrained to any bounding box.
You can set the size constraint of the label with this property.
The text will autoflow into the constraints. So although the font size
will not be reduced, the text will be arranged to fit into the box as best
as possible, with any text still outside the box clipped.
This sets and clips :attr:`texture_size` to text_size if not None.
.. versionadded:: 1.0.4
For example, whatever your current widget size is, if you want the label to
be created in a box with width=200 and unlimited height::
Label(text='Very big big line', text_size=(200, None))
.. note::
This text_size property is the same as the
:attr:`~kivy.core.text.Label.usersize` property in the
:class:`~kivy.core.text.Label` class. (It is named size= in the
constructor.)
:attr:`text_size` is a :class:`~kivy.properties.ListProperty` and
defaults to (None, None), meaning no size restriction by default.
'''
font_name = StringProperty('Roboto')
'''Filename of the font to use. The path can be absolute or relative.
Relative paths are resolved by the :func:`~kivy.resources.resource_find`
function.
.. warning::
Depending of your text provider, the font file can be ignored. However,
you can mostly use this without problems.
If the font used lacks the glyphs for the particular language/symbols
you are using, you will see '[]' blank box characters instead of the
actual glyphs. The solution is to use a font that has the glyphs you
need to display. For example, to display |unicodechar|, use a font such
as freesans.ttf that has the glyph.
.. |unicodechar| image:: images/unicode-char.png
:attr:`font_name` is a :class:`~kivy.properties.StringProperty` and
defaults to 'Roboto'.
'''
font_size = NumericProperty('15sp')
'''Font size of the text, in pixels.
:attr:`font_size` is a :class:`~kivy.properties.NumericProperty` and
defaults to 15sp.
'''
line_height = NumericProperty(1.0)
'''Line Height for the text. e.g. line_height = 2 will cause the spacing
between lines to be twice the size.
:attr:`line_height` is a :class:`~kivy.properties.NumericProperty` and
defaults to 1.0.
.. versionadded:: 1.5.0
'''
bold = BooleanProperty(False)
'''Indicates use of the bold version of your font.
.. note::
Depending of your font, the bold attribute may have no impact on your
text rendering.
:attr:`bold` is a :class:`~kivy.properties.BooleanProperty` and defaults to
False.
'''
italic = BooleanProperty(False)
'''Indicates use of the italic version of your font.
.. note::
Depending of your font, the italic attribute may have no impact on your
text rendering.
:attr:`italic` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
underline = BooleanProperty(False)
'''Adds an underline to the text.
.. note::
This feature requires the SDL2 text provider.
.. versionadded:: 1.9.2
:attr:`underline` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
strikethrough = BooleanProperty(False)
'''Adds a strikethrough line to the text.
.. note::
This feature requires the SDL2 text provider.
.. versionadded:: 1.9.2
:attr:`strikethrough` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
padding_x = NumericProperty(0)
'''Horizontal padding of the text inside the widget box.
:attr:`padding_x` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
.. versionchanged:: 1.9.0
`padding_x` has been fixed to work as expected.
In the past, the text was padded by the negative of its values.
'''
padding_y = NumericProperty(0)
'''Vertical padding of the text inside the widget box.
:attr:`padding_y` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
.. versionchanged:: 1.9.0
`padding_y` has been fixed to work as expected.
In the past, the text was padded by the negative of its values.
'''
padding = ReferenceListProperty(padding_x, padding_y)
'''Padding of the text in the format (padding_x, padding_y)
:attr:`padding` is a :class:`~kivy.properties.ReferenceListProperty` of
(:attr:`padding_x`, :attr:`padding_y`) properties.
'''
halign = OptionProperty('left', options=['left', 'center', 'right',
'justify'])
'''Horizontal alignment of the text.
:attr:`halign` is an :class:`~kivy.properties.OptionProperty` and
defaults to 'left'. Available options are : left, center, right and
justify.
.. warning::
This doesn't change the position of the text texture of the Label
(centered), only the position of the text in this texture. You probably
want to bind the size of the Label to the :attr:`texture_size` or set a
:attr:`text_size`.
.. versionchanged:: 1.6.0
A new option was added to :attr:`halign`, namely `justify`.
'''
valign = OptionProperty('bottom',
options=['bottom', 'middle', 'center', 'top'])
'''Vertical alignment of the text.
:attr:`valign` is an :class:`~kivy.properties.OptionProperty` and defaults
to 'bottom'. Available options are : `'bottom'`,
`'middle'` (or `'center'`) and `'top'`.
.. versionchanged:: 1.9.2
The `'center'` option has been added as an alias of `'middle'`.
.. warning::
This doesn't change the position of the text texture of the Label
(centered), only the position of the text within this texture. You
probably want to bind the size of the Label to the :attr:`texture_size`
or set a :attr:`text_size` to change this behavior.
'''
color = ListProperty([1, 1, 1, 1])
'''Text color, in the format (r, g, b, a).
:attr:`color` is a :class:`~kivy.properties.ListProperty` and defaults to
[1, 1, 1, 1].
'''
outline_width = NumericProperty(None, allownone=True)
'''Width in pixels for the outline around the text. No outline will be
rendered if the value is None.
.. note::
This feature requires the SDL2 text provider.
.. versionadded:: 1.9.2
:attr:`outline_width` is a :class:`~kivy.properties.NumericProperty` and
defaults to None.
'''
outline_color = ListProperty([0, 0, 0])
'''The color of the text outline, in the (r, g, b) format.
.. note::
This feature requires the SDL2 text provider.
.. versionadded:: 1.9.2
:attr:`outline_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [0, 0, 0].
'''
disabled_outline_color = ListProperty([0, 0, 0])
'''The color of the text outline when the widget is disabled, in the
(r, g, b) format.
.. note::
This feature requires the SDL2 text provider.
.. versionadded:: 1.9.2
:attr:`disabled_outline_color` is a :class:`~kivy.properties.ListProperty`
and defaults to [0, 0, 0].
'''
texture = ObjectProperty(None, allownone=True)
'''Texture object of the text.
The text is rendered automatically when a property changes. The OpenGL
texture created in this operation is stored in this property. You can use
this :attr:`texture` for any graphics elements.
Depending on the texture creation, the value will be a
:class:`~kivy.graphics.texture.Texture` or
:class:`~kivy.graphics.texture.TextureRegion` object.
.. warning::
The :attr:`texture` update is scheduled for the next frame. If you need
the texture immediately after changing a property, you have to call
the :meth:`texture_update` method before accessing :attr:`texture`::
l = Label(text='Hello world')
# l.texture is good
l.font_size = '50sp'
# l.texture is not updated yet
l.texture_update()
# l.texture is good now.
:attr:`texture` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
texture_size = ListProperty([0, 0])
'''Texture size of the text. The size is determined by the font size and
text. If :attr:`text_size` is [None, None], the texture will be the size
required to fit the text, otherwise it's clipped to fit :attr:`text_size`.
When :attr:`text_size` is [None, None], one can bind to texture_size
and rescale it proportionally to fit the size of the label in order to
make the text fit maximally in the label.
.. warning::
The :attr:`texture_size` is set after the :attr:`texture`
property. If you listen for changes to :attr:`texture`,
:attr:`texture_size` will not be up-to-date in your callback.
Bind to :attr:`texture_size` instead.
'''
mipmap = BooleanProperty(False)
'''Indicates whether OpenGL mipmapping is applied to the texture or not.
Read :ref:`mipmap` for more information.
.. versionadded:: 1.0.7
:attr:`mipmap` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
shorten = BooleanProperty(False)
'''
Indicates whether the label should attempt to shorten its textual contents
as much as possible if a :attr:`text_size` is given. Setting this to True
without an appropriately set :attr:`text_size` will lead to unexpected
results.
:attr:`shorten_from` and :attr:`split_str` control the direction from
which the :attr:`text` is split, as well as where in the :attr:`text` we
are allowed to split.
:attr:`shorten` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
shorten_from = OptionProperty('center', options=['left', 'center',
'right'])
'''The side from which we should shorten the text from, can be left,
right, or center.
For example, if left, the ellipsis will appear towards the left side and we
will display as much text starting from the right as possible. Similar to
:attr:`shorten`, this option only applies when :attr:`text_size` [0] is
not None, In this case, the string is shortened to fit within the specified
width.
.. versionadded:: 1.9.0
:attr:`shorten_from` is a :class:`~kivy.properties.OptionProperty` and
defaults to `center`.
'''
split_str = StringProperty('')
'''The string used to split the :attr:`text` while shortening the string
when :attr:`shorten` is True.
For example, if it's a space, the string will be broken into words and as
many whole words that can fit into a single line will be displayed. If
:attr:`split_str` is the empty string, `''`, we split on every character
fitting as much text as possible into the line.
.. versionadded:: 1.9.0
:attr:`split_str` is a :class:`~kivy.properties.StringProperty` and
defaults to `''` (the empty string).
'''
unicode_errors = OptionProperty(
'replace', options=('strict', 'replace', 'ignore'))
'''How to handle unicode decode errors. Can be `'strict'`, `'replace'` or
`'ignore'`.
.. versionadded:: 1.9.0
:attr:`unicode_errors` is an :class:`~kivy.properties.OptionProperty` and
defaults to `'replace'`.
'''
markup = BooleanProperty(False)
'''
.. versionadded:: 1.1.0
If True, the text will be rendered using the
:class:`~kivy.core.text.markup.MarkupLabel`: you can change the
style of the text using tags. Check the
:doc:`api-kivy.core.text.markup` documentation for more information.
:attr:`markup` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
refs = DictProperty({})
'''
.. versionadded:: 1.1.0
List of ``[ref=xxx]`` markup items in the text with the bounding box of
all the words contained in a ref, available only after rendering.
For example, if you wrote::
Check out my [ref=hello]link[/ref]
The refs will be set with::
{'hello': ((64, 0, 78, 16), )}
The references marked "hello" have a bounding box at (x1, y1, x2, y2).
These co-ordinates are relative to the top left corner of the text, with
the y value increasing downwards. You can define multiple refs with the same
name: each occurrence will be added as another (x1, y1, x2, y2) tuple to
this list.
The current Label implementation uses these references if they exist in
your markup text, automatically doing the collision with the touch and
dispatching an `on_ref_press` event.
You can bind a ref event like this::
def print_it(instance, value):
print('User click on', value)
widget = Label(text='Hello [ref=world]World[/ref]', markup=True)
widget.on_ref_press(print_it)
.. note::
This works only with markup text. You need :attr:`markup` set to
True.
'''
anchors = DictProperty({})
'''
.. versionadded:: 1.1.0
Position of all the ``[anchor=xxx]`` markup in the text.
These co-ordinates are relative to the top left corner of the text, with
the y value increasing downwards. Anchors names should be unique and only
the first occurrence of any duplicate anchors will be recorded.
You can place anchors in your markup text as follows::
text = """
[anchor=title1][size=24]This is my Big title.[/size]
[anchor=content]Hello world
"""
Then, all the ``[anchor=]`` references will be removed and you'll get all
the anchor positions in this property (only after rendering)::
>>> widget = Label(text=text, markup=True)
>>> widget.texture_update()
>>> widget.anchors
{"content": (20, 32), "title1": (20, 16)}
.. note::
This works only with markup text. You need :attr:`markup` set to
True.
'''
max_lines = NumericProperty(0)
'''Maximum number of lines to use, defaults to 0, which means unlimited.
Please note that :attr:`shorten` take over this property. (with
shorten, the text is always one line.)
.. versionadded:: 1.8.0
:attr:`max_lines` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
'''
strip = BooleanProperty(False)
'''Whether leading and trailing spaces and newlines should be stripped from
each displayed line. If True, every line will start at the right or left
edge, depending on :attr:`halign`. If :attr:`halign` is `justify` it is
implicitly True.
.. versionadded:: 1.9.0
:attr:`strip` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
font_hinting = OptionProperty(
'normal', options=[None, 'normal', 'light', 'mono'], allownone=True)
'''What hinting option to use for font rendering.
Can be one of `'normal'`, `'light'`, `'mono'` or None.
.. note::
This feature requires the SDL2 text provider.
.. versionadded:: 1.9.2
:attr:`font_hinting` is an :class:`~kivy.properties.OptionProperty` and
defaults to `'normal'`.
'''
font_kerning = BooleanProperty(True)
'''Whether kerning is enabled for font rendering.
.. note::
This feature requires the SDL2 text provider.
.. versionadded:: 1.9.2
:attr:`font_kerning` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
font_blended = BooleanProperty(True)
'''Whether blended or solid font rendering should be used.
.. note::
This feature requires the SDL2 text provider.
.. versionadded:: 1.9.2
:attr:`font_blended` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
|
py | b410850b4b900c736a6d6bbf2bbdceaf8cc1e644 | # -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import oslo_i18n
from oslo_i18n import _lazy
# The domain is the name of the App which is used to generate the folder
# containing the translation files (i.e. the .pot file and the various locales)
DOMAIN = "watcher"
_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
# The primary translation function using the well-known name "_"
_ = _translators.primary
# The contextual translation function using the name "_C"
_C = _translators.contextual_form
# The plural translation function using the name "_P"
_P = _translators.plural_form
def lazy_translation_enabled():
return _lazy.USE_LAZY
def get_available_languages():
return oslo_i18n.get_available_languages(DOMAIN)
|
py | b41085b9a79828d654e7ef9e645d0596c024f956 | # Generated by Django 3.1.3 on 2021-01-17 13:16
from django.db import migrations
import djmoney.models.fields
class Migration(migrations.Migration):
dependencies = [
('jobs', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='job',
name='price_currency',
field=djmoney.models.fields.CurrencyField(choices=[('XUA', 'ADB Unit of Account'), ('AFN', 'Afghani'), ('DZD', 'Algerian Dinar'), ('ARS', 'Argentine Peso'), ('AMD', 'Armenian Dram'), ('AWG', 'Aruban Guilder'), ('AUD', 'Australian Dollar'), ('AZN', 'Azerbaijanian Manat'), ('BSD', 'Bahamian Dollar'), ('BHD', 'Bahraini Dinar'), ('THB', 'Baht'), ('PAB', 'Balboa'), ('BBD', 'Barbados Dollar'), ('BYN', 'Belarussian Ruble'), ('BYR', 'Belarussian Ruble'), ('BZD', 'Belize Dollar'), ('BMD', 'Bermudian Dollar (customarily known as Bermuda Dollar)'), ('BTN', 'Bhutanese ngultrum'), ('VEF', 'Bolivar Fuerte'), ('BOB', 'Boliviano'), ('XBA', 'Bond Markets Units European Composite Unit (EURCO)'), ('BRL', 'Brazilian Real'), ('BND', 'Brunei Dollar'), ('BGN', 'Bulgarian Lev'), ('BIF', 'Burundi Franc'), ('XOF', 'CFA Franc BCEAO'), ('XAF', 'CFA franc BEAC'), ('XPF', 'CFP Franc'), ('CAD', 'Canadian Dollar'), ('CVE', 'Cape Verde Escudo'), ('KYD', 'Cayman Islands Dollar'), ('CLP', 'Chilean peso'), ('XTS', 'Codes specifically reserved for testing purposes'), ('COP', 'Colombian peso'), ('KMF', 'Comoro Franc'), ('CDF', 'Congolese franc'), ('BAM', 'Convertible Marks'), ('NIO', 'Cordoba Oro'), ('CRC', 'Costa Rican Colon'), ('HRK', 'Croatian Kuna'), ('CUP', 'Cuban Peso'), ('CUC', 'Cuban convertible peso'), ('CZK', 'Czech Koruna'), ('GMD', 'Dalasi'), ('DKK', 'Danish Krone'), ('MKD', 'Denar'), ('DJF', 'Djibouti Franc'), ('STD', 'Dobra'), ('DOP', 'Dominican Peso'), ('VND', 'Dong'), ('XCD', 'East Caribbean Dollar'), ('EGP', 'Egyptian Pound'), ('SVC', 'El Salvador Colon'), ('ETB', 'Ethiopian Birr'), ('EUR', 'Euro'), ('XBB', 'European Monetary Unit (E.M.U.-6)'), ('XBD', 'European Unit of Account 17(E.U.A.-17)'), ('XBC', 'European Unit of Account 9(E.U.A.-9)'), ('FKP', 'Falkland Islands Pound'), ('FJD', 'Fiji Dollar'), ('HUF', 'Forint'), ('GHS', 'Ghana Cedi'), ('GIP', 'Gibraltar Pound'), ('XAU', 'Gold'), ('XFO', 'Gold-Franc'), ('PYG', 'Guarani'), ('GNF', 'Guinea Franc'), ('GYD', 'Guyana Dollar'), ('HTG', 'Haitian gourde'), ('HKD', 'Hong Kong Dollar'), ('UAH', 'Hryvnia'), ('ISK', 'Iceland Krona'), ('INR', 'Indian Rupee'), ('IRR', 'Iranian Rial'), ('IQD', 'Iraqi Dinar'), ('IMP', 'Isle of Man Pound'), ('JMD', 'Jamaican Dollar'), ('JOD', 'Jordanian Dinar'), ('KES', 'Kenyan Shilling'), ('PGK', 'Kina'), ('LAK', 'Kip'), ('KWD', 'Kuwaiti Dinar'), ('AOA', 'Kwanza'), ('MMK', 'Kyat'), ('GEL', 'Lari'), ('LVL', 'Latvian Lats'), ('LBP', 'Lebanese Pound'), ('ALL', 'Lek'), ('HNL', 'Lempira'), ('SLL', 'Leone'), ('LSL', 'Lesotho loti'), ('LRD', 'Liberian Dollar'), ('LYD', 'Libyan Dinar'), ('SZL', 'Lilangeni'), ('LTL', 'Lithuanian Litas'), ('MGA', 'Malagasy Ariary'), ('MWK', 'Malawian Kwacha'), ('MYR', 'Malaysian Ringgit'), ('TMM', 'Manat'), ('MUR', 'Mauritius Rupee'), ('MZN', 'Metical'), ('MXV', 'Mexican Unidad de Inversion (UDI)'), ('MXN', 'Mexican peso'), ('MDL', 'Moldovan Leu'), ('MAD', 'Moroccan Dirham'), ('BOV', 'Mvdol'), ('NGN', 'Naira'), ('ERN', 'Nakfa'), ('NAD', 'Namibian Dollar'), ('NPR', 'Nepalese Rupee'), ('ANG', 'Netherlands Antillian Guilder'), ('ILS', 'New Israeli Sheqel'), ('RON', 'New Leu'), ('TWD', 'New Taiwan Dollar'), ('NZD', 'New Zealand Dollar'), ('KPW', 'North Korean Won'), ('NOK', 'Norwegian Krone'), ('PEN', 'Nuevo Sol'), ('MRO', 'Ouguiya'), ('TOP', 'Paanga'), ('PKR', 'Pakistan Rupee'), ('XPD', 'Palladium'), ('MOP', 'Pataca'), ('PHP', 'Philippine Peso'), ('XPT', 'Platinum'), ('GBP', 'Pound Sterling'), ('BWP', 'Pula'), ('QAR', 'Qatari Rial'), ('GTQ', 'Quetzal'), ('ZAR', 'Rand'), ('OMR', 'Rial Omani'), ('KHR', 'Riel'), ('MVR', 'Rufiyaa'), ('IDR', 'Rupiah'), ('RUB', 'Russian Ruble'), ('RWF', 'Rwanda Franc'), ('XDR', 'SDR'), ('SHP', 'Saint Helena Pound'), ('SAR', 'Saudi Riyal'), ('RSD', 'Serbian Dinar'), ('SCR', 'Seychelles Rupee'), ('XAG', 'Silver'), ('SGD', 'Singapore Dollar'), ('SBD', 'Solomon Islands Dollar'), ('KGS', 'Som'), ('SOS', 'Somali Shilling'), ('TJS', 'Somoni'), ('SSP', 'South Sudanese Pound'), ('LKR', 'Sri Lanka Rupee'), ('XSU', 'Sucre'), ('SDG', 'Sudanese Pound'), ('SRD', 'Surinam Dollar'), ('SEK', 'Swedish Krona'), ('CHF', 'Swiss Franc'), ('SYP', 'Syrian Pound'), ('BDT', 'Taka'), ('WST', 'Tala'), ('TZS', 'Tanzanian Shilling'), ('KZT', 'Tenge'), ('XXX', 'The codes assigned for transactions where no currency is involved'), ('TTD', 'Trinidad and Tobago Dollar'), ('MNT', 'Tugrik'), ('TND', 'Tunisian Dinar'), ('TRY', 'Turkish Lira'), ('TMT', 'Turkmenistan New Manat'), ('TVD', 'Tuvalu dollar'), ('AED', 'UAE Dirham'), ('XFU', 'UIC-Franc'), ('USD', 'US Dollar'), ('USN', 'US Dollar (Next day)'), ('UGX', 'Uganda Shilling'), ('CLF', 'Unidad de Fomento'), ('COU', 'Unidad de Valor Real'), ('UYI', 'Uruguay Peso en Unidades Indexadas (URUIURUI)'), ('UYU', 'Uruguayan peso'), ('UZS', 'Uzbekistan Sum'), ('VUV', 'Vatu'), ('CHE', 'WIR Euro'), ('CHW', 'WIR Franc'), ('KRW', 'Won'), ('YER', 'Yemeni Rial'), ('JPY', 'Yen'), ('CNY', 'Yuan Renminbi'), ('ZMK', 'Zambian Kwacha'), ('ZMW', 'Zambian Kwacha'), ('ZWD', 'Zimbabwe Dollar A/06'), ('ZWN', 'Zimbabwe dollar A/08'), ('ZWL', 'Zimbabwe dollar A/09'), ('PLN', 'Zloty')], default='USD', editable=False, max_length=3),
),
migrations.AlterField(
model_name='job',
name='price',
field=djmoney.models.fields.MoneyField(decimal_places=2, default_currency='USD', max_digits=14),
),
]
|
py | b4108720d691507697a24f66ffe6de136e380094 | import pytest
import os
import copy
import numpy as np
import simphony.core as core
import simphony.errors as errors
import simphony.DeviceLibrary.ebeam as dev
import simphony.simulation as sim
class TestNetlist:
def test_4Port_Circuit(self):
gc1 = core.ComponentInstance(dev.ebeam_gc_te1550)
gc2 = core.ComponentInstance(dev.ebeam_gc_te1550)
gc3 = core.ComponentInstance(dev.ebeam_gc_te1550)
gc4 = core.ComponentInstance(dev.ebeam_gc_te1550)
y1 = core.ComponentInstance(dev.ebeam_y_1550)
y2 = core.ComponentInstance(dev.ebeam_y_1550)
y3 = core.ComponentInstance(dev.ebeam_y_1550)
bdc1 = core.ComponentInstance(dev.ebeam_bdc_te1550)
bdc2 = core.ComponentInstance(dev.ebeam_bdc_te1550)
term1 = core.ComponentInstance(dev.ebeam_terminator_te1550)
wg1 = core.ComponentInstance(dev.ebeam_wg_integral_1550, extras={'length':165.51e-6})
wg2 = core.ComponentInstance(dev.ebeam_wg_integral_1550, extras={'length':247.73e-6})
wg3 = core.ComponentInstance(dev.ebeam_wg_integral_1550, extras={'length':642.91e-6})
wg4 = core.ComponentInstance(dev.ebeam_wg_integral_1550, extras={'length':391.06e-6})
wg5 = core.ComponentInstance(dev.ebeam_wg_integral_1550, extras={'length':10.45e-6})
wg6 = core.ComponentInstance(dev.ebeam_wg_integral_1550, extras={'length':10.45e-6})
wg7 = core.ComponentInstance(dev.ebeam_wg_integral_1550, extras={'length':10.45e-6})
wg8 = core.ComponentInstance(dev.ebeam_wg_integral_1550, extras={'length':10.45e-6})
wg9 = core.ComponentInstance(dev.ebeam_wg_integral_1550, extras={'length':162.29e-6})
wg10 = core.ComponentInstance(dev.ebeam_wg_integral_1550, extras={'length':205.47e-6})
connections = []
connections.append([gc1, 0, wg1, 1])
connections.append([gc3, 0, wg2, 1])
connections.append([bdc1, 3, wg1, 0])
connections.append([bdc1, 2, wg2, 0])
connections.append([gc2, 0, y1, 0])
connections.append([y1, 1, wg3, 0])
connections.append([y1, 2, wg4, 0])
connections.append([y2, 0, wg4, 1])
connections.append([y3, 0, wg3, 1])
connections.append([y2, 1, wg5, 1])
connections.append([bdc1, 0, wg5, 0])
connections.append([bdc1, 1, wg6, 1])
connections.append([y3, 2, wg6, 0])
connections.append([y2, 2, wg7, 0])
connections.append([y3, 1, wg8, 1])
connections.append([bdc2, 2, wg7, 1])
connections.append([bdc2, 3, wg8, 0])
connections.append([bdc2, 0, wg9, 0])
connections.append([term1, 0, wg9, 1])
connections.append([bdc2, 1, wg10, 0])
connections.append([gc4, 0, wg10, 1])
nl = core.Netlist()
nl.load(connections, formatter='ll')
simu = sim.Simulation(nl)
freq = simu.freq_array
two2zero = abs(simu.s_parameters()[:, 2, 0])**2
two2one = abs(simu.s_parameters()[:, 2, 1])**2
two2two = abs(simu.s_parameters()[:, 2, 2])**2
two2three = abs(simu.s_parameters()[:, 2, 3])**2
# np.savez('test_simphony_test_4Port_Circuit', freq=freq, two2zero=two2zero, two2one=two2one, two2two=two2two, two2three=two2three)
expected = np.load(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'benchmarks', 'test_simphony_test_4Port_Circuit.npz'))
assert np.all(freq == expected['freq'])
assert np.all(two2zero == expected['two2zero'])
assert np.all(two2one == expected['two2one'])
assert np.all(two2two == expected['two2two'])
assert np.all(two2three == expected['two2three'])
# import matplotlib.pyplot as plt
# plt.subplot(221)
# plt.plot(freq, two2zero)
# plt.subplot(222)
# plt.plot(freq, two2one)
# plt.subplot(223)
# plt.plot(freq, two2two)
# plt.subplot(224)
# plt.plot(freq, two2three)
# plt.suptitle("A4")
# plt.show()
def test_mzi(self):
y1 = core.ComponentInstance(dev.ebeam_y_1550)
y2 = core.ComponentInstance(dev.ebeam_y_1550)
wg1 = core.ComponentInstance(dev.ebeam_wg_integral_1550, extras={'length':50e-6})
wg2 = core.ComponentInstance(dev.ebeam_wg_integral_1550, extras={'length':150e-6})
c1 = [y1, y1, y2, y2]
p1 = [1, 2, 2, 1]
c2 = [wg1, wg2, wg1, wg2]
p2 = [0, 0, 1, 1]
con = zip(c1, p1, c2, p2)
nl = core.Netlist()
nl.load(con, formatter='ll')
simu = sim.Simulation(nl)
freq = simu.freq_array
zero2zero = abs(simu.s_parameters()[:, 0, 0])**2
zero2one = abs(simu.s_parameters()[:, 0, 1])**2
one2zero = abs(simu.s_parameters()[:, 1, 0])**2
one2one = abs(simu.s_parameters()[:, 1, 1])**2
# np.savez('test_simphony_test_mzi', freq=freq, zero2zero=zero2zero, zero2one=zero2one, one2zero=one2zero, one2one=one2one)
expected = np.load(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'benchmarks', 'test_simphony_test_mzi.npz'))
assert np.all(freq == expected['freq'])
assert np.all(zero2zero == expected['zero2zero'])
assert np.all(zero2one == expected['zero2one'])
assert np.all(one2zero == expected['one2zero'])
assert np.all(one2one == expected['one2one'])
# import matplotlib.pyplot as plt
# plt.subplot(221)
# plt.plot(freq, zero2zero)
# plt.subplot(222)
# plt.plot(freq, zero2one)
# plt.subplot(223)
# plt.plot(freq, one2zero)
# plt.subplot(224)
# plt.plot(freq, one2one)
# plt.suptitle("MZI")
# plt.show() |
py | b41087dcb8a8d1773a33c9579d1542b8f449f247 | import os
from lint import Linter
def find_files(root, ext):
root = root.rstrip(os.sep) + os.sep
ret = []
for base, dirs, names in os.walk(root):
for name in names:
if name.endswith(ext):
base = base.replace(root, '', 1)
ret.append(os.path.join(base, name))
return ret
class Golang(Linter):
language = 'go'
cmd = ('go', 'build', '-gcflags', '-e -N')
regex = r'.+?:(?P<line>\d+): (?P<error>.+)'
def run(self, cmd, code):
code = code.encode('utf8')
if not self.filename:
tools = self.popen(('go', 'tool')).communicate()[0].split('\n')
for compiler in ('6g', '8g'):
if compiler in tools:
return self.tmpfile(('go', 'tool', compiler, '-e', '-o', os.devnull), code, suffix='.go')
else:
path = os.path.split(self.filename)[0]
os.chdir(path)
files = find_files(path, '.go')
answer = self.tmpdir(cmd, files, code)
return answer
|
py | b41088146ceb844778cd8eb70517c8828791d1bc | # Author: AKHILESH SANTOSHWAR
# this program will illustrate an example for finding the longest CONTINUOUS ODD subsequence
# INPUT: [2, 6, 8, 3, 9, 1, 5, 6, 1, 3, 5, 7, 7, 1, 2, 3, 4, 5]
# OUTPUT: [1, 3, 5, 7, 7, 1] 6
# in short we have to find the elements and length of the longest continuous odd subsequence
def longest_continuous_odd_subsequence(array):
final_list = []
temp_list = []
for i in array:
# we want to find whether the element is odd
if i%2 == 0:
# if element is even and our temp_list is not empty then only append it to out result list
if temp_list != []:
final_list.append(temp_list)
temp_list = []
else:
# if element is odd, append it to our temp_list
temp_list.append(i)
# if temp_list is not empty at the last iteration, add it to the final_list
if temp_list != []:
final_list.append(temp_list)
# print the maximum list based on its length
result = max(final_list, key=len)
print(result, len(result))
if __name__ == '__main__':
array = [2, 6, 8, 3, 9, 1, 5, 6, 1, 3, 5, 7, 7, 1, 2, 3, 4, 5]
longest_continuous_odd_subsequence(array)
|
py | b410886140710c75917caa4e8500c864c1293991 | import math
import numbers
import random
import warnings
from collections.abc import Sequence
from typing import Tuple, List, Optional
import torch
from torch import Tensor
try:
import accimage
except ImportError:
accimage = None
from . import functional as F
from .functional import InterpolationMode, _interpolation_modes_from_int
__all__ = ["Compose", "ToTensor", "PILToTensor", "ConvertImageDtype", "ToPILImage", "Normalize", "Resize", "Scale",
"CenterCrop", "Pad", "Lambda", "RandomApply", "RandomChoice", "RandomOrder", "RandomCrop",
"RandomHorizontalFlip", "RandomVerticalFlip", "RandomResizedCrop", "RandomSizedCrop", "FiveCrop", "TenCrop",
"LinearTransformation", "ColorJitter", "RandomRotation", "RandomAffine", "Grayscale", "RandomGrayscale",
"RandomPerspective", "RandomErasing", "GaussianBlur", "InterpolationMode", "RandomInvert", "RandomPosterize",
"RandomSolarize", "RandomAdjustSharpness", "RandomAutocontrast", "RandomEqualize"]
class Compose:
"""Composes several transforms together. This transform does not support torchscript.
Please, see the note below.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>>> transforms.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
.. note::
In order to script the transformations, please use ``torch.nn.Sequential`` as below.
>>> transforms = torch.nn.Sequential(
>>> transforms.CenterCrop(10),
>>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
>>> )
>>> scripted_transforms = torch.jit.script(transforms)
Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor``, does not require
`lambda` functions or ``PIL.Image``.
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img):
for t in self.transforms:
img = t(img)
return img
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class ToTensor:
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor. This transform does not support torchscript.
Converts a PIL Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)
or if the numpy.ndarray has dtype = np.uint8
In the other cases, tensors are returned without scaling.
.. note::
Because the input image is scaled to [0.0, 1.0], this transformation should not be used when
transforming target image masks. See the `references`_ for implementing the transforms for image masks.
.. _references: https://github.com/pytorch/vision/tree/master/references/segmentation
"""
def __call__(self, pic):
"""
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
return F.to_tensor(pic)
def __repr__(self):
return self.__class__.__name__ + '()'
class PILToTensor:
"""Convert a ``PIL Image`` to a tensor of the same type. This transform does not support torchscript.
Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
"""
def __call__(self, pic):
"""
Args:
pic (PIL Image): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
return F.pil_to_tensor(pic)
def __repr__(self):
return self.__class__.__name__ + '()'
class ConvertImageDtype(torch.nn.Module):
"""Convert a tensor image to the given ``dtype`` and scale the values accordingly
This function does not support PIL Image.
Args:
dtype (torch.dtype): Desired data type of the output
.. note::
When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
If converted back and forth, this mismatch has no effect.
Raises:
RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
of the integer ``dtype``.
"""
def __init__(self, dtype: torch.dtype) -> None:
super().__init__()
self.dtype = dtype
def forward(self, image):
return F.convert_image_dtype(image, self.dtype)
class ToPILImage:
"""Convert a tensor or an ndarray to PIL Image. This transform does not support torchscript.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while preserving the value range.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
- If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
- If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
- If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
- If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
``short``).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
"""
def __init__(self, mode=None):
self.mode = mode
def __call__(self, pic):
"""
Args:
pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
Returns:
PIL Image: Image converted to PIL Image.
"""
return F.to_pil_image(pic, self.mode)
def __repr__(self):
format_string = self.__class__.__name__ + '('
if self.mode is not None:
format_string += 'mode={0}'.format(self.mode)
format_string += ')'
return format_string
class Normalize(torch.nn.Module):
"""Normalize a tensor image with mean and standard deviation.
This transform does not support PIL Image.
Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n``
channels, this transform will normalize each channel of the input
``torch.*Tensor`` i.e.,
``output[channel] = (input[channel] - mean[channel]) / std[channel]``
.. note::
This transform acts out of place, i.e., it does not mutate the input tensor.
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
inplace(bool,optional): Bool to make this operation in-place.
"""
def __init__(self, mean, std, inplace=False):
super().__init__()
self.mean = mean
self.std = std
self.inplace = inplace
def forward(self, tensor: Tensor) -> Tensor:
"""
Args:
tensor (Tensor): Tensor image to be normalized.
Returns:
Tensor: Normalized Tensor image.
"""
return F.normalize(tensor, self.mean, self.std, self.inplace)
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
class Resize(torch.nn.Module):
"""Resize the input image to the given size.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
.. warning::
The output image might be different depending on its type: when downsampling, the interpolation of PIL images
and tensors is slightly different, because PIL applies antialiasing. This may lead to significant differences
in the performance of a network. Therefore, it is preferable to train and serve a model with the same input
types.
Args:
size (sequence or int): Desired output size. If size is a sequence like
(h, w), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size).
.. note::
In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` and
``InterpolationMode.BICUBIC`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
max_size (int, optional): The maximum allowed for the longer edge of
the resized image: if the longer edge of the image is greater
than ``max_size`` after being resized according to ``size``, then
the image is resized again so that the longer edge is equal to
``max_size``. As a result, ```size` might be overruled, i.e the
smaller edge may be shorter than ``size``. This is only supported
if ``size`` is an int (or a sequence of length 1 in torchscript
mode).
"""
def __init__(self, size, interpolation=InterpolationMode.BILINEAR, max_size=None):
super().__init__()
if not isinstance(size, (int, Sequence)):
raise TypeError("Size should be int or sequence. Got {}".format(type(size)))
if isinstance(size, Sequence) and len(size) not in (1, 2):
raise ValueError("If size is a sequence, it should have 1 or 2 values")
self.size = size
self.max_size = max_size
# Backward compatibility with integer value
if isinstance(interpolation, int):
warnings.warn(
"Argument interpolation should be of type InterpolationMode instead of int. "
"Please, use InterpolationMode enum."
)
interpolation = _interpolation_modes_from_int(interpolation)
self.interpolation = interpolation
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be scaled.
Returns:
PIL Image or Tensor: Rescaled image.
"""
return F.resize(img, self.size, self.interpolation, self.max_size)
def __repr__(self):
interpolate_str = self.interpolation.value
return self.__class__.__name__ + '(size={0}, interpolation={1}, max_size={2})'.format(
self.size, interpolate_str, self.max_size)
class Scale(Resize):
"""
Note: This transform is deprecated in favor of Resize.
"""
def __init__(self, *args, **kwargs):
warnings.warn("The use of the transforms.Scale transform is deprecated, " +
"please use transforms.Resize instead.")
super(Scale, self).__init__(*args, **kwargs)
class CenterCrop(torch.nn.Module):
"""Crops the given image at the center.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
If image size is smaller than output size along any edge, image is padded with 0 and then center cropped.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
"""
def __init__(self, size):
super().__init__()
self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be cropped.
Returns:
PIL Image or Tensor: Cropped image.
"""
return F.center_crop(img, self.size)
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
class Pad(torch.nn.Module):
"""Pad the given image on all sides with the given "pad" value.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means at most 2 leading dimensions for mode reflect and symmetric,
at most 3 leading dimensions for mode edge,
and an arbitrary number of leading dimensions for mode constant
Args:
padding (int or sequence): Padding on each border. If a single int is provided this
is used to pad all borders. If sequence of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a sequence of length 4 is provided
this is the padding for the left, top, right and bottom borders respectively.
.. note::
In torchscript mode padding as single int is not supported, use a sequence of
length 1: ``[padding, ]``.
fill (number or str or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant.
Only number is supported for torch Tensor.
Only int or str or tuple value is supported for PIL Image.
padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.
Default is constant.
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value at the edge of the image,
if input a 5D torch Tensor, the last 3 dimensions will be padded instead of the last 2
- reflect: pads with reflection of image without repeating the last value on the edge
For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image repeating the last value on the edge
For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
"""
def __init__(self, padding, fill=0, padding_mode="constant"):
super().__init__()
if not isinstance(padding, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate padding arg")
if not isinstance(fill, (numbers.Number, str, tuple)):
raise TypeError("Got inappropriate fill arg")
if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")
if isinstance(padding, Sequence) and len(padding) not in [1, 2, 4]:
raise ValueError("Padding must be an int or a 1, 2, or 4 element tuple, not a " +
"{} element tuple".format(len(padding)))
self.padding = padding
self.fill = fill
self.padding_mode = padding_mode
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be padded.
Returns:
PIL Image or Tensor: Padded image.
"""
return F.pad(img, self.padding, self.fill, self.padding_mode)
def __repr__(self):
return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'.\
format(self.padding, self.fill, self.padding_mode)
class Lambda:
"""Apply a user-defined lambda as a transform. This transform does not support torchscript.
Args:
lambd (function): Lambda/function to be used for transform.
"""
def __init__(self, lambd):
if not callable(lambd):
raise TypeError("Argument lambd should be callable, got {}".format(repr(type(lambd).__name__)))
self.lambd = lambd
def __call__(self, img):
return self.lambd(img)
def __repr__(self):
return self.__class__.__name__ + '()'
class RandomTransforms:
"""Base class for a list of transformations with randomness
Args:
transforms (sequence): list of transformations
"""
def __init__(self, transforms):
if not isinstance(transforms, Sequence):
raise TypeError("Argument transforms should be a sequence")
self.transforms = transforms
def __call__(self, *args, **kwargs):
raise NotImplementedError()
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class RandomApply(torch.nn.Module):
"""Apply randomly a list of transformations with a given probability.
.. note::
In order to script the transformation, please use ``torch.nn.ModuleList`` as input instead of list/tuple of
transforms as shown below:
>>> transforms = transforms.RandomApply(torch.nn.ModuleList([
>>> transforms.ColorJitter(),
>>> ]), p=0.3)
>>> scripted_transforms = torch.jit.script(transforms)
Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor``, does not require
`lambda` functions or ``PIL.Image``.
Args:
transforms (sequence or torch.nn.Module): list of transformations
p (float): probability
"""
def __init__(self, transforms, p=0.5):
super().__init__()
self.transforms = transforms
self.p = p
def forward(self, img):
if self.p < torch.rand(1):
return img
for t in self.transforms:
img = t(img)
return img
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += '\n p={}'.format(self.p)
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class RandomOrder(RandomTransforms):
"""Apply a list of transformations in a random order. This transform does not support torchscript.
"""
def __call__(self, img):
order = list(range(len(self.transforms)))
random.shuffle(order)
for i in order:
img = self.transforms[i](img)
return img
class RandomChoice(RandomTransforms):
"""Apply single transformation randomly picked from a list. This transform does not support torchscript.
"""
def __call__(self, img):
t = random.choice(self.transforms)
return t(img)
class RandomCrop(torch.nn.Module):
"""Crop the given image at a random location.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions,
but if non-constant padding is used, the input is expected to have at most 2 leading dimensions
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
padding (int or sequence, optional): Optional padding on each border
of the image. Default is None. If a single int is provided this
is used to pad all borders. If sequence of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a sequence of length 4 is provided
this is the padding for the left, top, right and bottom borders respectively.
.. note::
In torchscript mode padding as single int is not supported, use a sequence of
length 1: ``[padding, ]``.
pad_if_needed (boolean): It will pad the image if smaller than the
desired size to avoid raising an exception. Since cropping is done
after padding, the padding seems to be done at a random offset.
fill (number or str or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant.
Only number is supported for torch Tensor.
Only int or str or tuple value is supported for PIL Image.
padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value on the edge of the image
- reflect: pads with reflection of image (without repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image (repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
"""
@staticmethod
def get_params(img: Tensor, output_size: Tuple[int, int]) -> Tuple[int, int, int, int]:
"""Get parameters for ``crop`` for a random crop.
Args:
img (PIL Image or Tensor): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
"""
w, h = F._get_image_size(img)
th, tw = output_size
if h + 1 < th or w + 1 < tw:
raise ValueError(
"Required crop size {} is larger then input image size {}".format((th, tw), (h, w))
)
if w == tw and h == th:
return 0, 0, h, w
i = torch.randint(0, h - th + 1, size=(1, )).item()
j = torch.randint(0, w - tw + 1, size=(1, )).item()
return i, j, th, tw
def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode="constant"):
super().__init__()
self.size = tuple(_setup_size(
size, error_msg="Please provide only two dimensions (h, w) for size."
))
self.padding = padding
self.pad_if_needed = pad_if_needed
self.fill = fill
self.padding_mode = padding_mode
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be cropped.
Returns:
PIL Image or Tensor: Cropped image.
"""
if self.padding is not None:
img = F.pad(img, self.padding, self.fill, self.padding_mode)
width, height = F._get_image_size(img)
# pad the width if needed
if self.pad_if_needed and width < self.size[1]:
padding = [self.size[1] - width, 0]
img = F.pad(img, padding, self.fill, self.padding_mode)
# pad the height if needed
if self.pad_if_needed and height < self.size[0]:
padding = [0, self.size[0] - height]
img = F.pad(img, padding, self.fill, self.padding_mode)
i, j, h, w = self.get_params(img, self.size)
return F.crop(img, i, j, h, w)
def __repr__(self):
return self.__class__.__name__ + "(size={0}, padding={1})".format(self.size, self.padding)
class RandomHorizontalFlip(torch.nn.Module):
"""Horizontally flip the given image randomly with a given probability.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading
dimensions
Args:
p (float): probability of the image being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
super().__init__()
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be flipped.
Returns:
PIL Image or Tensor: Randomly flipped image.
"""
if torch.rand(1) < self.p:
return F.hflip(img)
return img
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class RandomVerticalFlip(torch.nn.Module):
"""Vertically flip the given image randomly with a given probability.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading
dimensions
Args:
p (float): probability of the image being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
super().__init__()
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be flipped.
Returns:
PIL Image or Tensor: Randomly flipped image.
"""
if torch.rand(1) < self.p:
return F.vflip(img)
return img
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class RandomPerspective(torch.nn.Module):
"""Performs a random perspective transformation of the given image with a given probability.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
Args:
distortion_scale (float): argument to control the degree of distortion and ranges from 0 to 1.
Default is 0.5.
p (float): probability of the image being transformed. Default is 0.5.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
fill (sequence or number): Pixel fill value for the area outside the transformed
image. Default is ``0``. If given a number, the value is used for all bands respectively.
If input is PIL Image, the options is only available for ``Pillow>=5.0.0``.
"""
def __init__(self, distortion_scale=0.5, p=0.5, interpolation=InterpolationMode.BILINEAR, fill=0):
super().__init__()
self.p = p
# Backward compatibility with integer value
if isinstance(interpolation, int):
warnings.warn(
"Argument interpolation should be of type InterpolationMode instead of int. "
"Please, use InterpolationMode enum."
)
interpolation = _interpolation_modes_from_int(interpolation)
self.interpolation = interpolation
self.distortion_scale = distortion_scale
if fill is None:
fill = 0
elif not isinstance(fill, (Sequence, numbers.Number)):
raise TypeError("Fill should be either a sequence or a number.")
self.fill = fill
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be Perspectively transformed.
Returns:
PIL Image or Tensor: Randomly transformed image.
"""
fill = self.fill
if isinstance(img, Tensor):
if isinstance(fill, (int, float)):
fill = [float(fill)] * F._get_image_num_channels(img)
else:
fill = [float(f) for f in fill]
if torch.rand(1) < self.p:
width, height = F._get_image_size(img)
startpoints, endpoints = self.get_params(width, height, self.distortion_scale)
return F.perspective(img, startpoints, endpoints, self.interpolation, fill)
return img
@staticmethod
def get_params(width: int, height: int, distortion_scale: float) -> Tuple[List[List[int]], List[List[int]]]:
"""Get parameters for ``perspective`` for a random perspective transform.
Args:
width (int): width of the image.
height (int): height of the image.
distortion_scale (float): argument to control the degree of distortion and ranges from 0 to 1.
Returns:
List containing [top-left, top-right, bottom-right, bottom-left] of the original image,
List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image.
"""
half_height = height // 2
half_width = width // 2
topleft = [
int(torch.randint(0, int(distortion_scale * half_width) + 1, size=(1, )).item()),
int(torch.randint(0, int(distortion_scale * half_height) + 1, size=(1, )).item())
]
topright = [
int(torch.randint(width - int(distortion_scale * half_width) - 1, width, size=(1, )).item()),
int(torch.randint(0, int(distortion_scale * half_height) + 1, size=(1, )).item())
]
botright = [
int(torch.randint(width - int(distortion_scale * half_width) - 1, width, size=(1, )).item()),
int(torch.randint(height - int(distortion_scale * half_height) - 1, height, size=(1, )).item())
]
botleft = [
int(torch.randint(0, int(distortion_scale * half_width) + 1, size=(1, )).item()),
int(torch.randint(height - int(distortion_scale * half_height) - 1, height, size=(1, )).item())
]
startpoints = [[0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]]
endpoints = [topleft, topright, botright, botleft]
return startpoints, endpoints
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class RandomResizedCrop(torch.nn.Module):
"""Crop a random portion of image and resize it to a given size.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
A crop of the original image is made: the crop has a random area (H * W)
and a random aspect ratio. This crop is finally resized to the given
size. This is popularly used to train the Inception networks.
Args:
size (int or sequence): expected output size of the crop, for each edge. If size is an
int instead of sequence like (h, w), a square output size ``(size, size)`` is
made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
.. note::
In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.
scale (tuple of float): Specifies the lower and upper bounds for the random area of the crop,
before resizing. The scale is defined with respect to the area of the original image.
ratio (tuple of float): lower and upper bounds for the random aspect ratio of the crop, before
resizing.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` and
``InterpolationMode.BICUBIC`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
"""
def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=InterpolationMode.BILINEAR):
super().__init__()
self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
if not isinstance(scale, Sequence):
raise TypeError("Scale should be a sequence")
if not isinstance(ratio, Sequence):
raise TypeError("Ratio should be a sequence")
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("Scale and ratio should be of kind (min, max)")
# Backward compatibility with integer value
if isinstance(interpolation, int):
warnings.warn(
"Argument interpolation should be of type InterpolationMode instead of int. "
"Please, use InterpolationMode enum."
)
interpolation = _interpolation_modes_from_int(interpolation)
self.interpolation = interpolation
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(
img: Tensor, scale: List[float], ratio: List[float]
) -> Tuple[int, int, int, int]:
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image or Tensor): Input image.
scale (list): range of scale of the origin size cropped
ratio (list): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
width, height = F._get_image_size(img)
area = height * width
log_ratio = torch.log(torch.tensor(ratio))
for _ in range(10):
target_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()
aspect_ratio = torch.exp(
torch.empty(1).uniform_(log_ratio[0], log_ratio[1])
).item()
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if 0 < w <= width and 0 < h <= height:
i = torch.randint(0, height - h + 1, size=(1,)).item()
j = torch.randint(0, width - w + 1, size=(1,)).item()
return i, j, h, w
# Fallback to central crop
in_ratio = float(width) / float(height)
if in_ratio < min(ratio):
w = width
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = height
w = int(round(h * max(ratio)))
else: # whole image
w = width
h = height
i = (height - h) // 2
j = (width - w) // 2
return i, j, h, w
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be cropped and resized.
Returns:
PIL Image or Tensor: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
return F.resized_crop(img, i, j, h, w, self.size, self.interpolation)
def __repr__(self):
interpolate_str = self.interpolation.value
format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))
format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))
format_string += ', interpolation={0})'.format(interpolate_str)
return format_string
class RandomSizedCrop(RandomResizedCrop):
"""
Note: This transform is deprecated in favor of RandomResizedCrop.
"""
def __init__(self, *args, **kwargs):
warnings.warn("The use of the transforms.RandomSizedCrop transform is deprecated, " +
"please use transforms.RandomResizedCrop instead.")
super(RandomSizedCrop, self).__init__(*args, **kwargs)
class FiveCrop(torch.nn.Module):
"""Crop the given image into four corners and the central crop.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading
dimensions
.. Note::
This transform returns a tuple of images and there may be a mismatch in the number of
inputs and targets your Dataset returns. See below for an example of how to deal with
this.
Args:
size (sequence or int): Desired output size of the crop. If size is an ``int``
instead of sequence like (h, w), a square crop of size (size, size) is made.
If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
Example:
>>> transform = Compose([
>>> FiveCrop(size), # this is a list of PIL Images
>>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor
>>> ])
>>> #In your test loop you can do the following:
>>> input, target = batch # input is a 5d tensor, target is 2d
>>> bs, ncrops, c, h, w = input.size()
>>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops
>>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops
"""
def __init__(self, size):
super().__init__()
self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be cropped.
Returns:
tuple of 5 images. Image can be PIL Image or Tensor
"""
return F.five_crop(img, self.size)
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
class TenCrop(torch.nn.Module):
"""Crop the given image into four corners and the central crop plus the flipped version of
these (horizontal flipping is used by default).
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading
dimensions
.. Note::
This transform returns a tuple of images and there may be a mismatch in the number of
inputs and targets your Dataset returns. See below for an example of how to deal with
this.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
vertical_flip (bool): Use vertical flipping instead of horizontal
Example:
>>> transform = Compose([
>>> TenCrop(size), # this is a list of PIL Images
>>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor
>>> ])
>>> #In your test loop you can do the following:
>>> input, target = batch # input is a 5d tensor, target is 2d
>>> bs, ncrops, c, h, w = input.size()
>>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops
>>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops
"""
def __init__(self, size, vertical_flip=False):
super().__init__()
self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
self.vertical_flip = vertical_flip
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be cropped.
Returns:
tuple of 10 images. Image can be PIL Image or Tensor
"""
return F.ten_crop(img, self.size, self.vertical_flip)
def __repr__(self):
return self.__class__.__name__ + '(size={0}, vertical_flip={1})'.format(self.size, self.vertical_flip)
class LinearTransformation(torch.nn.Module):
"""Transform a tensor image with a square transformation matrix and a mean_vector computed
offline.
This transform does not support PIL Image.
Given transformation_matrix and mean_vector, will flatten the torch.*Tensor and
subtract mean_vector from it which is then followed by computing the dot
product with the transformation matrix and then reshaping the tensor to its
original shape.
Applications:
whitening transformation: Suppose X is a column vector zero-centered data.
Then compute the data covariance matrix [D x D] with torch.mm(X.t(), X),
perform SVD on this matrix and pass it as transformation_matrix.
Args:
transformation_matrix (Tensor): tensor [D x D], D = C x H x W
mean_vector (Tensor): tensor [D], D = C x H x W
"""
def __init__(self, transformation_matrix, mean_vector):
super().__init__()
if transformation_matrix.size(0) != transformation_matrix.size(1):
raise ValueError("transformation_matrix should be square. Got " +
"[{} x {}] rectangular matrix.".format(*transformation_matrix.size()))
if mean_vector.size(0) != transformation_matrix.size(0):
raise ValueError("mean_vector should have the same length {}".format(mean_vector.size(0)) +
" as any one of the dimensions of the transformation_matrix [{}]"
.format(tuple(transformation_matrix.size())))
if transformation_matrix.device != mean_vector.device:
raise ValueError("Input tensors should be on the same device. Got {} and {}"
.format(transformation_matrix.device, mean_vector.device))
self.transformation_matrix = transformation_matrix
self.mean_vector = mean_vector
def forward(self, tensor: Tensor) -> Tensor:
"""
Args:
tensor (Tensor): Tensor image to be whitened.
Returns:
Tensor: Transformed image.
"""
shape = tensor.shape
n = shape[-3] * shape[-2] * shape[-1]
if n != self.transformation_matrix.shape[0]:
raise ValueError("Input tensor and transformation matrix have incompatible shape." +
"[{} x {} x {}] != ".format(shape[-3], shape[-2], shape[-1]) +
"{}".format(self.transformation_matrix.shape[0]))
if tensor.device.type != self.mean_vector.device.type:
raise ValueError("Input tensor should be on the same device as transformation matrix and mean vector. "
"Got {} vs {}".format(tensor.device, self.mean_vector.device))
flat_tensor = tensor.view(-1, n) - self.mean_vector
transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)
tensor = transformed_tensor.view(shape)
return tensor
def __repr__(self):
format_string = self.__class__.__name__ + '(transformation_matrix='
format_string += (str(self.transformation_matrix.tolist()) + ')')
format_string += (", (mean_vector=" + str(self.mean_vector.tolist()) + ')')
return format_string
class ColorJitter(torch.nn.Module):
"""Randomly change the brightness, contrast, saturation and hue of an image.
If the image is torch Tensor, it is expected
to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, mode "1", "L", "I", "F" and modes with transparency (alpha channel) are not supported.
Args:
brightness (float or tuple of float (min, max)): How much to jitter brightness.
brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]
or the given [min, max]. Should be non negative numbers.
contrast (float or tuple of float (min, max)): How much to jitter contrast.
contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]
or the given [min, max]. Should be non negative numbers.
saturation (float or tuple of float (min, max)): How much to jitter saturation.
saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]
or the given [min, max]. Should be non negative numbers.
hue (float or tuple of float (min, max)): How much to jitter hue.
hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].
Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
super().__init__()
self.brightness = self._check_input(brightness, 'brightness')
self.contrast = self._check_input(contrast, 'contrast')
self.saturation = self._check_input(saturation, 'saturation')
self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),
clip_first_on_zero=False)
@torch.jit.unused
def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):
if isinstance(value, numbers.Number):
if value < 0:
raise ValueError("If {} is a single number, it must be non negative.".format(name))
value = [center - float(value), center + float(value)]
if clip_first_on_zero:
value[0] = max(value[0], 0.0)
elif isinstance(value, (tuple, list)) and len(value) == 2:
if not bound[0] <= value[0] <= value[1] <= bound[1]:
raise ValueError("{} values should be between {}".format(name, bound))
else:
raise TypeError("{} should be a single number or a list/tuple with length 2.".format(name))
# if value is 0 or (1., 1.) for brightness/contrast/saturation
# or (0., 0.) for hue, do nothing
if value[0] == value[1] == center:
value = None
return value
@staticmethod
def get_params(brightness: Optional[List[float]],
contrast: Optional[List[float]],
saturation: Optional[List[float]],
hue: Optional[List[float]]
) -> Tuple[Tensor, Optional[float], Optional[float], Optional[float], Optional[float]]:
"""Get the parameters for the randomized transform to be applied on image.
Args:
brightness (tuple of float (min, max), optional): The range from which the brightness_factor is chosen
uniformly. Pass None to turn off the transformation.
contrast (tuple of float (min, max), optional): The range from which the contrast_factor is chosen
uniformly. Pass None to turn off the transformation.
saturation (tuple of float (min, max), optional): The range from which the saturation_factor is chosen
uniformly. Pass None to turn off the transformation.
hue (tuple of float (min, max), optional): The range from which the hue_factor is chosen uniformly.
Pass None to turn off the transformation.
Returns:
tuple: The parameters used to apply the randomized transform
along with their random order.
"""
fn_idx = torch.randperm(4)
b = None if brightness is None else float(torch.empty(1).uniform_(brightness[0], brightness[1]))
c = None if contrast is None else float(torch.empty(1).uniform_(contrast[0], contrast[1]))
s = None if saturation is None else float(torch.empty(1).uniform_(saturation[0], saturation[1]))
h = None if hue is None else float(torch.empty(1).uniform_(hue[0], hue[1]))
return fn_idx, b, c, s, h
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Input image.
Returns:
PIL Image or Tensor: Color jittered image.
"""
fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor = \
self.get_params(self.brightness, self.contrast, self.saturation, self.hue)
for fn_id in fn_idx:
if fn_id == 0 and brightness_factor is not None:
img = F.adjust_brightness(img, brightness_factor)
elif fn_id == 1 and contrast_factor is not None:
img = F.adjust_contrast(img, contrast_factor)
elif fn_id == 2 and saturation_factor is not None:
img = F.adjust_saturation(img, saturation_factor)
elif fn_id == 3 and hue_factor is not None:
img = F.adjust_hue(img, hue_factor)
return img
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += 'brightness={0}'.format(self.brightness)
format_string += ', contrast={0}'.format(self.contrast)
format_string += ', saturation={0}'.format(self.saturation)
format_string += ', hue={0})'.format(self.hue)
return format_string
class RandomRotation(torch.nn.Module):
"""Rotate the image by angle.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
Args:
degrees (sequence or number): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees).
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
expand (bool, optional): Optional expansion flag.
If true, expands the output to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (sequence, optional): Optional center of rotation, (x, y). Origin is the upper left corner.
Default is the center of the image.
fill (sequence or number): Pixel fill value for the area outside the rotated
image. Default is ``0``. If given a number, the value is used for all bands respectively.
If input is PIL Image, the options is only available for ``Pillow>=5.2.0``.
resample (int, optional): deprecated argument and will be removed since v0.10.0.
Please use the ``interpolation`` parameter instead.
.. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
"""
def __init__(
self, degrees, interpolation=InterpolationMode.NEAREST, expand=False, center=None, fill=0, resample=None
):
super().__init__()
if resample is not None:
warnings.warn(
"Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead"
)
interpolation = _interpolation_modes_from_int(resample)
# Backward compatibility with integer value
if isinstance(interpolation, int):
warnings.warn(
"Argument interpolation should be of type InterpolationMode instead of int. "
"Please, use InterpolationMode enum."
)
interpolation = _interpolation_modes_from_int(interpolation)
self.degrees = _setup_angle(degrees, name="degrees", req_sizes=(2, ))
if center is not None:
_check_sequence_input(center, "center", req_sizes=(2, ))
self.center = center
self.resample = self.interpolation = interpolation
self.expand = expand
if fill is None:
fill = 0
elif not isinstance(fill, (Sequence, numbers.Number)):
raise TypeError("Fill should be either a sequence or a number.")
self.fill = fill
@staticmethod
def get_params(degrees: List[float]) -> float:
"""Get parameters for ``rotate`` for a random rotation.
Returns:
float: angle parameter to be passed to ``rotate`` for random rotation.
"""
angle = float(torch.empty(1).uniform_(float(degrees[0]), float(degrees[1])).item())
return angle
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be rotated.
Returns:
PIL Image or Tensor: Rotated image.
"""
fill = self.fill
if isinstance(img, Tensor):
if isinstance(fill, (int, float)):
fill = [float(fill)] * F._get_image_num_channels(img)
else:
fill = [float(f) for f in fill]
angle = self.get_params(self.degrees)
return F.rotate(img, angle, self.resample, self.expand, self.center, fill)
def __repr__(self):
interpolate_str = self.interpolation.value
format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees)
format_string += ', interpolation={0}'.format(interpolate_str)
format_string += ', expand={0}'.format(self.expand)
if self.center is not None:
format_string += ', center={0}'.format(self.center)
if self.fill is not None:
format_string += ', fill={0}'.format(self.fill)
format_string += ')'
return format_string
class RandomAffine(torch.nn.Module):
"""Random affine transformation of the image keeping center invariant.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
Args:
degrees (sequence or number): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees). Set to 0 to deactivate rotations.
translate (tuple, optional): tuple of maximum absolute fraction for horizontal
and vertical translations. For example translate=(a, b), then horizontal shift
is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is
randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.
scale (tuple, optional): scaling factor interval, e.g (a, b), then scale is
randomly sampled from the range a <= scale <= b. Will keep original scale by default.
shear (sequence or number, optional): Range of degrees to select from.
If shear is a number, a shear parallel to the x axis in the range (-shear, +shear)
will be applied. Else if shear is a sequence of 2 values a shear parallel to the x axis in the
range (shear[0], shear[1]) will be applied. Else if shear is a sequence of 4 values,
a x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied.
Will not apply shear by default.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
fill (sequence or number): Pixel fill value for the area outside the transformed
image. Default is ``0``. If given a number, the value is used for all bands respectively.
If input is PIL Image, the options is only available for ``Pillow>=5.0.0``.
fillcolor (sequence or number, optional): deprecated argument and will be removed since v0.10.0.
Please use the ``fill`` parameter instead.
resample (int, optional): deprecated argument and will be removed since v0.10.0.
Please use the ``interpolation`` parameter instead.
.. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
"""
def __init__(
self, degrees, translate=None, scale=None, shear=None, interpolation=InterpolationMode.NEAREST, fill=0,
fillcolor=None, resample=None
):
super().__init__()
if resample is not None:
warnings.warn(
"Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead"
)
interpolation = _interpolation_modes_from_int(resample)
# Backward compatibility with integer value
if isinstance(interpolation, int):
warnings.warn(
"Argument interpolation should be of type InterpolationMode instead of int. "
"Please, use InterpolationMode enum."
)
interpolation = _interpolation_modes_from_int(interpolation)
if fillcolor is not None:
warnings.warn(
"Argument fillcolor is deprecated and will be removed since v0.10.0. Please, use fill instead"
)
fill = fillcolor
self.degrees = _setup_angle(degrees, name="degrees", req_sizes=(2, ))
if translate is not None:
_check_sequence_input(translate, "translate", req_sizes=(2, ))
for t in translate:
if not (0.0 <= t <= 1.0):
raise ValueError("translation values should be between 0 and 1")
self.translate = translate
if scale is not None:
_check_sequence_input(scale, "scale", req_sizes=(2, ))
for s in scale:
if s <= 0:
raise ValueError("scale values should be positive")
self.scale = scale
if shear is not None:
self.shear = _setup_angle(shear, name="shear", req_sizes=(2, 4))
else:
self.shear = shear
self.resample = self.interpolation = interpolation
if fill is None:
fill = 0
elif not isinstance(fill, (Sequence, numbers.Number)):
raise TypeError("Fill should be either a sequence or a number.")
self.fillcolor = self.fill = fill
@staticmethod
def get_params(
degrees: List[float],
translate: Optional[List[float]],
scale_ranges: Optional[List[float]],
shears: Optional[List[float]],
img_size: List[int]
) -> Tuple[float, Tuple[int, int], float, Tuple[float, float]]:
"""Get parameters for affine transformation
Returns:
params to be passed to the affine transformation
"""
angle = float(torch.empty(1).uniform_(float(degrees[0]), float(degrees[1])).item())
if translate is not None:
max_dx = float(translate[0] * img_size[0])
max_dy = float(translate[1] * img_size[1])
tx = int(round(torch.empty(1).uniform_(-max_dx, max_dx).item()))
ty = int(round(torch.empty(1).uniform_(-max_dy, max_dy).item()))
translations = (tx, ty)
else:
translations = (0, 0)
if scale_ranges is not None:
scale = float(torch.empty(1).uniform_(scale_ranges[0], scale_ranges[1]).item())
else:
scale = 1.0
shear_x = shear_y = 0.0
if shears is not None:
shear_x = float(torch.empty(1).uniform_(shears[0], shears[1]).item())
if len(shears) == 4:
shear_y = float(torch.empty(1).uniform_(shears[2], shears[3]).item())
shear = (shear_x, shear_y)
return angle, translations, scale, shear
def forward(self, img):
"""
img (PIL Image or Tensor): Image to be transformed.
Returns:
PIL Image or Tensor: Affine transformed image.
"""
fill = self.fill
if isinstance(img, Tensor):
if isinstance(fill, (int, float)):
fill = [float(fill)] * F._get_image_num_channels(img)
else:
fill = [float(f) for f in fill]
img_size = F._get_image_size(img)
ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, img_size)
return F.affine(img, *ret, interpolation=self.interpolation, fill=fill)
def __repr__(self):
s = '{name}(degrees={degrees}'
if self.translate is not None:
s += ', translate={translate}'
if self.scale is not None:
s += ', scale={scale}'
if self.shear is not None:
s += ', shear={shear}'
if self.interpolation != InterpolationMode.NEAREST:
s += ', interpolation={interpolation}'
if self.fill != 0:
s += ', fill={fill}'
s += ')'
d = dict(self.__dict__)
d['interpolation'] = self.interpolation.value
return s.format(name=self.__class__.__name__, **d)
class Grayscale(torch.nn.Module):
"""Convert image to grayscale.
If the image is torch Tensor, it is expected
to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions
Args:
num_output_channels (int): (1 or 3) number of channels desired for output image
Returns:
PIL Image: Grayscale version of the input.
- If ``num_output_channels == 1`` : returned image is single channel
- If ``num_output_channels == 3`` : returned image is 3 channel with r == g == b
"""
def __init__(self, num_output_channels=1):
super().__init__()
self.num_output_channels = num_output_channels
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be converted to grayscale.
Returns:
PIL Image or Tensor: Grayscaled image.
"""
return F.rgb_to_grayscale(img, num_output_channels=self.num_output_channels)
def __repr__(self):
return self.__class__.__name__ + '(num_output_channels={0})'.format(self.num_output_channels)
class RandomGrayscale(torch.nn.Module):
"""Randomly convert image to grayscale with a probability of p (default 0.1).
If the image is torch Tensor, it is expected
to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions
Args:
p (float): probability that image should be converted to grayscale.
Returns:
PIL Image or Tensor: Grayscale version of the input image with probability p and unchanged
with probability (1-p).
- If input image is 1 channel: grayscale version is 1 channel
- If input image is 3 channel: grayscale version is 3 channel with r == g == b
"""
def __init__(self, p=0.1):
super().__init__()
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be converted to grayscale.
Returns:
PIL Image or Tensor: Randomly grayscaled image.
"""
num_output_channels = F._get_image_num_channels(img)
if torch.rand(1) < self.p:
return F.rgb_to_grayscale(img, num_output_channels=num_output_channels)
return img
def __repr__(self):
return self.__class__.__name__ + '(p={0})'.format(self.p)
class RandomErasing(torch.nn.Module):
""" Randomly selects a rectangle region in an torch Tensor image and erases its pixels.
This transform does not support PIL Image.
'Random Erasing Data Augmentation' by Zhong et al. See https://arxiv.org/abs/1708.04896
Args:
p: probability that the random erasing operation will be performed.
scale: range of proportion of erased area against input image.
ratio: range of aspect ratio of erased area.
value: erasing value. Default is 0. If a single int, it is used to
erase all pixels. If a tuple of length 3, it is used to erase
R, G, B channels respectively.
If a str of 'random', erasing each pixel with random values.
inplace: boolean to make this transform inplace. Default set to False.
Returns:
Erased Image.
Example:
>>> transform = transforms.Compose([
>>> transforms.RandomHorizontalFlip(),
>>> transforms.ToTensor(),
>>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
>>> transforms.RandomErasing(),
>>> ])
"""
def __init__(self, p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, inplace=False):
super().__init__()
if not isinstance(value, (numbers.Number, str, tuple, list)):
raise TypeError("Argument value should be either a number or str or a sequence")
if isinstance(value, str) and value != "random":
raise ValueError("If value is str, it should be 'random'")
if not isinstance(scale, (tuple, list)):
raise TypeError("Scale should be a sequence")
if not isinstance(ratio, (tuple, list)):
raise TypeError("Ratio should be a sequence")
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("Scale and ratio should be of kind (min, max)")
if scale[0] < 0 or scale[1] > 1:
raise ValueError("Scale should be between 0 and 1")
if p < 0 or p > 1:
raise ValueError("Random erasing probability should be between 0 and 1")
self.p = p
self.scale = scale
self.ratio = ratio
self.value = value
self.inplace = inplace
@staticmethod
def get_params(
img: Tensor, scale: Tuple[float, float], ratio: Tuple[float, float], value: Optional[List[float]] = None
) -> Tuple[int, int, int, int, Tensor]:
"""Get parameters for ``erase`` for a random erasing.
Args:
img (Tensor): Tensor image to be erased.
scale (sequence): range of proportion of erased area against input image.
ratio (sequence): range of aspect ratio of erased area.
value (list, optional): erasing value. If None, it is interpreted as "random"
(erasing each pixel with random values). If ``len(value)`` is 1, it is interpreted as a number,
i.e. ``value[0]``.
Returns:
tuple: params (i, j, h, w, v) to be passed to ``erase`` for random erasing.
"""
img_c, img_h, img_w = img.shape[-3], img.shape[-2], img.shape[-1]
area = img_h * img_w
log_ratio = torch.log(torch.tensor(ratio))
for _ in range(10):
erase_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()
aspect_ratio = torch.exp(
torch.empty(1).uniform_(log_ratio[0], log_ratio[1])
).item()
h = int(round(math.sqrt(erase_area * aspect_ratio)))
w = int(round(math.sqrt(erase_area / aspect_ratio)))
if not (h < img_h and w < img_w):
continue
if value is None:
v = torch.empty([img_c, h, w], dtype=torch.float32).normal_()
else:
v = torch.tensor(value)[:, None, None]
i = torch.randint(0, img_h - h + 1, size=(1, )).item()
j = torch.randint(0, img_w - w + 1, size=(1, )).item()
return i, j, h, w, v
# Return original image
return 0, 0, img_h, img_w, img
def forward(self, img):
"""
Args:
img (Tensor): Tensor image to be erased.
Returns:
img (Tensor): Erased Tensor image.
"""
if torch.rand(1) < self.p:
# cast self.value to script acceptable type
if isinstance(self.value, (int, float)):
value = [self.value, ]
elif isinstance(self.value, str):
value = None
elif isinstance(self.value, tuple):
value = list(self.value)
else:
value = self.value
if value is not None and not (len(value) in (1, img.shape[-3])):
raise ValueError(
"If value is a sequence, it should have either a single value or "
"{} (number of input channels)".format(img.shape[-3])
)
x, y, h, w, v = self.get_params(img, scale=self.scale, ratio=self.ratio, value=value)
return F.erase(img, x, y, h, w, v, self.inplace)
return img
def __repr__(self):
s = '(p={}, '.format(self.p)
s += 'scale={}, '.format(self.scale)
s += 'ratio={}, '.format(self.ratio)
s += 'value={}, '.format(self.value)
s += 'inplace={})'.format(self.inplace)
return self.__class__.__name__ + s
class GaussianBlur(torch.nn.Module):
"""Blurs image with randomly chosen Gaussian blur.
If the image is torch Tensor, it is expected
to have [..., C, H, W] shape, where ... means an arbitrary number of leading dimensions.
Args:
kernel_size (int or sequence): Size of the Gaussian kernel.
sigma (float or tuple of float (min, max)): Standard deviation to be used for
creating kernel to perform blurring. If float, sigma is fixed. If it is tuple
of float (min, max), sigma is chosen uniformly at random to lie in the
given range.
Returns:
PIL Image or Tensor: Gaussian blurred version of the input image.
"""
def __init__(self, kernel_size, sigma=(0.1, 2.0)):
super().__init__()
self.kernel_size = _setup_size(kernel_size, "Kernel size should be a tuple/list of two integers")
for ks in self.kernel_size:
if ks <= 0 or ks % 2 == 0:
raise ValueError("Kernel size value should be an odd and positive number.")
if isinstance(sigma, numbers.Number):
if sigma <= 0:
raise ValueError("If sigma is a single number, it must be positive.")
sigma = (sigma, sigma)
elif isinstance(sigma, Sequence) and len(sigma) == 2:
if not 0. < sigma[0] <= sigma[1]:
raise ValueError("sigma values should be positive and of the form (min, max).")
else:
raise ValueError("sigma should be a single number or a list/tuple with length 2.")
self.sigma = sigma
@staticmethod
def get_params(sigma_min: float, sigma_max: float) -> float:
"""Choose sigma for random gaussian blurring.
Args:
sigma_min (float): Minimum standard deviation that can be chosen for blurring kernel.
sigma_max (float): Maximum standard deviation that can be chosen for blurring kernel.
Returns:
float: Standard deviation to be passed to calculate kernel for gaussian blurring.
"""
return torch.empty(1).uniform_(sigma_min, sigma_max).item()
def forward(self, img: Tensor) -> Tensor:
"""
Args:
img (PIL Image or Tensor): image to be blurred.
Returns:
PIL Image or Tensor: Gaussian blurred image
"""
sigma = self.get_params(self.sigma[0], self.sigma[1])
return F.gaussian_blur(img, self.kernel_size, [sigma, sigma])
def __repr__(self):
s = '(kernel_size={}, '.format(self.kernel_size)
s += 'sigma={})'.format(self.sigma)
return self.__class__.__name__ + s
def _setup_size(size, error_msg):
if isinstance(size, numbers.Number):
return int(size), int(size)
if isinstance(size, Sequence) and len(size) == 1:
return size[0], size[0]
if len(size) != 2:
raise ValueError(error_msg)
return size
def _check_sequence_input(x, name, req_sizes):
msg = req_sizes[0] if len(req_sizes) < 2 else " or ".join([str(s) for s in req_sizes])
if not isinstance(x, Sequence):
raise TypeError("{} should be a sequence of length {}.".format(name, msg))
if len(x) not in req_sizes:
raise ValueError("{} should be sequence of length {}.".format(name, msg))
def _setup_angle(x, name, req_sizes=(2, )):
if isinstance(x, numbers.Number):
if x < 0:
raise ValueError("If {} is a single number, it must be positive.".format(name))
x = [-x, x]
else:
_check_sequence_input(x, name, req_sizes)
return [float(d) for d in x]
class RandomInvert(torch.nn.Module):
"""Inverts the colors of the given image randomly with a given probability.
If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where ... means it can have an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
p (float): probability of the image being color inverted. Default value is 0.5
"""
def __init__(self, p=0.5):
super().__init__()
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be inverted.
Returns:
PIL Image or Tensor: Randomly color inverted image.
"""
if torch.rand(1).item() < self.p:
return F.invert(img)
return img
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class RandomPosterize(torch.nn.Module):
"""Posterize the image randomly with a given probability by reducing the
number of bits for each color channel. If the image is torch Tensor, it should be of type torch.uint8,
and it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
bits (int): number of bits to keep for each channel (0-8)
p (float): probability of the image being color inverted. Default value is 0.5
"""
def __init__(self, bits, p=0.5):
super().__init__()
self.bits = bits
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be posterized.
Returns:
PIL Image or Tensor: Randomly posterized image.
"""
if torch.rand(1).item() < self.p:
return F.posterize(img, self.bits)
return img
def __repr__(self):
return self.__class__.__name__ + '(bits={},p={})'.format(self.bits, self.p)
class RandomSolarize(torch.nn.Module):
"""Solarize the image randomly with a given probability by inverting all pixel
values above a threshold. If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where ... means it can have an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
threshold (float): all pixels equal or above this value are inverted.
p (float): probability of the image being color inverted. Default value is 0.5
"""
def __init__(self, threshold, p=0.5):
super().__init__()
self.threshold = threshold
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be solarized.
Returns:
PIL Image or Tensor: Randomly solarized image.
"""
if torch.rand(1).item() < self.p:
return F.solarize(img, self.threshold)
return img
def __repr__(self):
return self.__class__.__name__ + '(threshold={},p={})'.format(self.threshold, self.p)
class RandomAdjustSharpness(torch.nn.Module):
"""Adjust the sharpness of the image randomly with a given probability. If the image is torch Tensor,
it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
Args:
sharpness_factor (float): How much to adjust the sharpness. Can be
any non negative number. 0 gives a blurred image, 1 gives the
original image while 2 increases the sharpness by a factor of 2.
p (float): probability of the image being color inverted. Default value is 0.5
"""
def __init__(self, sharpness_factor, p=0.5):
super().__init__()
self.sharpness_factor = sharpness_factor
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be sharpened.
Returns:
PIL Image or Tensor: Randomly sharpened image.
"""
if torch.rand(1).item() < self.p:
return F.adjust_sharpness(img, self.sharpness_factor)
return img
def __repr__(self):
return self.__class__.__name__ + '(sharpness_factor={},p={})'.format(self.sharpness_factor, self.p)
class RandomAutocontrast(torch.nn.Module):
"""Autocontrast the pixels of the given image randomly with a given probability.
If the image is torch Tensor, it is expected
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
p (float): probability of the image being autocontrasted. Default value is 0.5
"""
def __init__(self, p=0.5):
super().__init__()
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be autocontrasted.
Returns:
PIL Image or Tensor: Randomly autocontrasted image.
"""
if torch.rand(1).item() < self.p:
return F.autocontrast(img)
return img
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class RandomEqualize(torch.nn.Module):
"""Equalize the histogram of the given image randomly with a given probability.
If the image is torch Tensor, it is expected
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "P", "L" or "RGB".
Args:
p (float): probability of the image being equalized. Default value is 0.5
"""
def __init__(self, p=0.5):
super().__init__()
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be equalized.
Returns:
PIL Image or Tensor: Randomly equalized image.
"""
if torch.rand(1).item() < self.p:
return F.equalize(img)
return img
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
|
py | b410895b2ce1619cdd1329832ad15fbfa64da851 | import json
import time
import pandas as pd
import numpy as np
import concurrent.futures as cf
from typing import Union, Optional, Dict, List, Tuple
from google.protobuf.json_format import MessageToDict
from requests_futures.sessions import FuturesSession
from arize import public_pb2 as public__pb2
from arize.bounded_executor import BoundedExecutor
from arize.utils.types import ModelTypes
from arize.model import (
TrainingRecords,
ValidationRecords,
)
from arize.utils.utils import (
validate_prediction_timestamps,
bundle_records,
convert_element,
get_value_object,
get_timestamp,
is_timestamp_in_range,
infer_model_type,
get_bulk_records,
)
from arize.__init__ import __version__
def _label_validation(
model_type: ModelTypes, label: Union[str, bool, int, float, Tuple[str, float]]
):
if model_type == ModelTypes.BINARY:
if not (isinstance(label, bool) or label == 0 or label == 1):
raise TypeError(
f"label {label} has type {type(label)}, but must be one a bool, 0 or 1 for ModelTypes.BINARY"
)
elif model_type == ModelTypes.NUMERIC:
if not isinstance(label, (float, int)):
raise TypeError(
f"label {label} has type {type(label)}, but must be either float or int for ModelTypes.NUMERIC"
)
elif label is np.nan:
raise ValueError("label for ModelTypes.NUMERIC cannot be null value")
elif model_type == ModelTypes.CATEGORICAL:
if not isinstance(label, str):
raise TypeError(
f"label {label} has type {type(label)}, but must be str for ModelTypes.CATEGORICAL"
)
elif model_type == ModelTypes.SCORE_CATEGORICAL:
c = isinstance(label, str) or (
isinstance(label, tuple)
and isinstance(label[0], str)
and isinstance(label[1], float)
)
if isinstance(label, tuple) and label[1] is np.nan:
raise ValueError(f"Prediction score for ModelTypes.SCORE_CATEGORICAL cannot be null value")
if not c:
raise TypeError(
f"label {label} has type {type(label)}, but must be str or Tuple[str, float] for ModelTypes.SCORE_CATEGORICAL"
)
def _get_label(
name: str,
value: Union[str, bool, int, float, Tuple[str, float]],
model_type: Optional[ModelTypes],
) -> public__pb2.Label:
if isinstance(value, public__pb2.Label):
return value
value = convert_element(value)
if model_type == ModelTypes.SCORE_CATEGORICAL:
if isinstance(value, tuple):
return _get_score_categorical_label(value)
else:
sc = public__pb2.ScoreCategorical()
sc.category.category = value
return public__pb2.Label(score_categorical=sc)
elif model_type == ModelTypes.BINARY:
return public__pb2.Label(binary=value)
elif model_type == ModelTypes.NUMERIC:
return public__pb2.Label(numeric=value)
elif model_type == ModelTypes.CATEGORICAL:
return public__pb2.Label(categorical=value)
raise TypeError(
f"{name}_label = {value} of type {type(value)}. Must be one of str, bool, float, int, or Tuple[str, float]"
)
def _get_score_categorical_label(value):
sc = public__pb2.ScoreCategorical()
if value[1] is not None:
sc.score_category.category = value[0]
sc.score_category.score = value[1]
else:
sc.category.category = value[0]
return public__pb2.Label(score_categorical=sc)
def _validate_bulk_prediction(
model_version,
prediction_labels,
prediction_ids,
features,
feature_names_overwrite,
prediction_timestamps,
):
if prediction_labels.isna().values.any():
raise ValueError("prediction labels cannot contain null value")
if not isinstance(model_version, str):
raise TypeError(
f"model_version {model_version} is type {type(model_version)}, but must be a str"
)
# Validate prediction labels type and shape and that column length is the same as prediction ids
if not isinstance(prediction_labels, (pd.DataFrame, pd.Series)):
raise TypeError(
f"prediction_labels is type {type(prediction_labels)}, but expects one of: pd.DataFrame, pd.Series"
)
if isinstance(prediction_labels, pd.DataFrame) and not (
prediction_labels.shape[1] == 1 or prediction_labels.shape[1] == 2
):
raise ValueError(
f"prediction_labels contains {prediction_labels.shape[1]} columns, but can only have 1 or 2"
)
if isinstance(prediction_labels, pd.DataFrame) and prediction_labels.shape[1] == 2:
if not pd.api.types.is_string_dtype(
prediction_labels[prediction_labels.columns[0]]
):
raise TypeError(
f"Two column prediction_labels must have strings for column 0."
)
if not pd.api.types.is_numeric_dtype(
prediction_labels[prediction_labels.columns[1]]
):
raise TypeError(
f"Two column prediction_labels must have numerics for column 1."
)
if prediction_labels.shape[0] != prediction_ids.shape[0]:
raise ValueError(
f"prediction_labels contains {prediction_labels.shape[0]} elements, but must have the same as "
f"predictions_ids: {prediction_ids.shape[0]}. "
)
# Validate features type, shape matches prediction ids, and handle feature names overwrite
if features is not None:
if not isinstance(features, pd.DataFrame):
raise TypeError(
f"features is type {type(features)}, but expect type pd.DataFrame."
)
if features.shape[0] != prediction_ids.shape[0]:
raise ValueError(
f"features has {features.shape[0]} sets of features, but must match size of predictions_ids: "
f"{prediction_ids.shape[0]}. "
)
if feature_names_overwrite is not None:
if len(features.columns) != len(feature_names_overwrite):
raise ValueError(
f"feature_names_overwrite has len:{len(feature_names_overwrite)}, but expects the same "
f"number of columns in features dataframe: {len(features.columns)}. "
)
else:
for name in features.columns:
if not isinstance(name, str) and not isinstance(name, int) and not isinstance(name, float):
raise TypeError(
f"features.column {name} is type {type(name)}, but expect str"
)
# Validate timestamp overwrite
validate_prediction_timestamps(prediction_ids, prediction_timestamps)
class Client:
"""
Arize API Client to report model predictions and actuals to Arize AI platform
"""
def __init__(
self,
api_key: str,
organization_key: str,
uri="https://api.arize.com/v1",
max_workers=8,
max_queue_bound=5000,
retry_attempts=3,
timeout=200,
):
"""
:params api_key: (str) api key associated with your account with Arize AI
:params organization_key: (str) organization key in Arize AI
:params max_workers: (int) number of max concurrent requests to Arize. Default: 20
:max_queue_bound: (int) number of maximum concurrent future objects being generated for publishing to Arize. Default: 5000
"""
if not isinstance(organization_key, str):
raise TypeError(
f"organization_key {organization_key} is type {type(organization_key)}, but must be a str"
)
self._retry_attempts = retry_attempts
self._uri = uri + "/log"
self._bulk_url = uri + "/bulk"
self._stream_uri = uri + "/preprod"
self._files_uri = uri + "/files"
self._api_key = api_key
self._organization_key = organization_key
self._timeout = timeout
self._session = FuturesSession(
executor=BoundedExecutor(max_queue_bound, max_workers)
)
# Grpc-Metadata prefix is required to pass non-standard md through via grpc-gateway
self._header = {
"authorization": api_key,
"Grpc-Metadata-organization": organization_key,
"Grpc-Metadata-sdk-version": __version__,
"Grpc-Metadata-sdk": "py",
}
def log(
self,
model_id: str,
prediction_id: Union[str, int, float],
model_version: str = None,
prediction_label: Union[str, bool, int, float, Tuple[str, float]] = None,
actual_label: Union[str, bool, int, float, Tuple[str, float]] = None,
shap_values: Dict[str, float] = None,
features: Optional[Dict[Union[str, int, float], Union[str, bool, float, int]]] = None,
model_type: Optional[ModelTypes] = None,
prediction_timestamp: Optional[int] = None,
) -> cf.Future:
"""Logs a record to Arize via a POST request. Returns :class:`Future` object.
:param model_id: (str) Unique identifier for a given model
:param prediction_id: (str, int, float) Unique string identifier for a specific prediction. This value is used to match a prediction to an actual label or feature imporances in the Arize platform.
:param model_version: (str) Field used to group together a subset of predictions and actuals for a given model_id.
:param prediction_label: (one of str, bool, int, float, Tuple[str, float]) The predicted value for a given model input.
:param actual_label: (one of str, bool, int, float) The actual true value for a given model input. This actual will be matched to the prediction with the same prediction_id as the one in this call.
:param shap_values: (str, float) Dictionary containing human readable and debuggable model features keys, along with SHAP feature importance values. Keys must be str, while values must be float.
:param features: ((str, int, float), <value>) Optional dictionary containing human readable and debuggable model features. Keys must be str, values one of str, bool, float, long.
:param model_type: (ModelTypes) Declares what model type this prediction is for. Binary, Numeric, Categorical, Score_Categorical.
:param prediction_timestamp: (int) Optional field with unix epoch time in seconds to overwrite timestamp for prediction. If None, prediction uses current timestamp.
:rtype : concurrent.futures.Future
"""
# Validate model_id
if not isinstance(model_id, str):
raise TypeError(
f"model_id {model_id} is type {type(model_id)}, but must be a str"
)
# Validate feature types
if features is not None and bool(features):
for k, v in features.items():
val = convert_element(v)
if val is not None and not isinstance(val, (str, bool, float, int)):
raise TypeError(
f"feature {k} with value {v} is type {type(v)}, but expected one of: str, bool, float, int"
)
# Check the timestamp present on the event
if prediction_timestamp is not None and not isinstance(
prediction_timestamp, int
):
raise TypeError(
f"prediction_timestamp {prediction_timestamp} is type {type(prediction_timestamp)} but expected int"
)
now = int(time.time())
if prediction_timestamp is not None and not is_timestamp_in_range(
now, prediction_timestamp
):
raise ValueError(
f"prediction_timestamp: {prediction_timestamp} is out of range. Value must be within 1 year of the current time."
)
# Construct the prediction
p = None
if prediction_label is not None:
if not isinstance(model_version, str):
raise TypeError(
f"model_version {model_version} is type {type(model_version)}, but must be a str"
)
model_type = (
infer_model_type(prediction_label) if model_type is None else model_type
)
_label_validation(model_type, label=convert_element(prediction_label))
p = public__pb2.Prediction(
label=_get_label(
value=prediction_label,
name="prediction",
model_type=model_type,
),
model_version=model_version,
)
if features is not None:
converted_feats = {}
for (k, v) in features.items():
val = get_value_object(value=v, name=k)
if val is not None:
converted_feats[str(k)] = val
feats = public__pb2.Prediction(features=converted_feats)
p.MergeFrom(feats)
if prediction_timestamp is not None:
p.timestamp.MergeFrom(get_timestamp(prediction_timestamp))
# Validate and construct the optional actual
a = None
if actual_label is not None:
model_type = (
infer_model_type(actual_label) if model_type is None else model_type
)
_label_validation(model_type, label=convert_element(actual_label))
a = public__pb2.Actual(
label=_get_label(
value=actual_label, name="actual", model_type=model_type
)
)
# Validate and construct the optional feature importances
fi = None
if shap_values is not None and bool(shap_values):
for k, v in shap_values.items():
if not isinstance(convert_element(v), float):
raise TypeError(
f"feature {k} with value {v} is type {type(v)}, but expected one of: float"
)
fi = public__pb2.FeatureImportances(feature_importances=shap_values)
if p is None and a is None and fi is None:
raise ValueError(
f"must provide at least one of prediction_label, actual_label, or shap_values"
)
rec = public__pb2.Record(
organization_key=self._organization_key,
model_id=model_id,
prediction_id=str(prediction_id),
prediction=p,
actual=a,
feature_importances=fi,
)
return self._post(record=rec, uri=self._uri, indexes=None)
def bulk_log(
self,
model_id: str,
prediction_ids: Union[pd.DataFrame, pd.Series],
model_version: str = None,
prediction_labels: Union[
pd.DataFrame, pd.Series
] = None, # 1xN or 2xN (for scored categorical)
features: Optional[Union[pd.DataFrame, pd.Series]] = None,
actual_labels: Union[pd.DataFrame, pd.Series] = None,
shap_values: Union[pd.DataFrame, pd.Series] = None,
model_type: Optional[ModelTypes] = None,
feature_names_overwrite: Optional[List[str]] = None,
prediction_timestamps: Optional[Union[List[int], pd.Series]] = None,
) -> List[cf.Future]:
"""Logs a collection of predictions with Arize via a POST request. Returns list<:class:`Future`> object.
:param model_id: (str) Unique identifier for a given model
:param model_version: (str) Field used to group together a subset of predictions and actuals for a given model_id.
:param prediction_ids: Pandas DataFrame with shape (N, 1) or Series with str valued elements. Each element corresponding to a unique string identifier for a specific prediction. These values are needed to match latent actual labels to their original prediction labels. Each element corresponds to feature values of the same index.
:param prediction_labels: Optional Pandas DataFrame with shape (N, 1) or (N, 2) or Series. The predicted values for a given model input. Values are associates to the ids in the same index. For a (N, 2) DataFrame column 0 is interpretted as the prediction category and column 1 is interpretted as the prediction score.
:param features: Optional Pandas DataFrame with shape (N, 2) containing human readable and debuggable model features. DataFrames columns (df.columns) should contain feature names and must have same number of rows as prediction_ids and prediction_labels. N.B. np.nan values are stripped from the record and manifest on our platform as a missing value (not 0.0 or NaN)
:param actual_labels: Optional Pandas DataFrame with shape (N, 1) or Series. The actual true values for a given model input. Values are associates to the labels in the same index.
:param shap_values: Optional Pandas DataFrame with shape (N, 1) or Series. The SHAP value sets for a set of predictions. SHAP value sets are correspond to the prediction ids with the same index.
:param model_type: (ModelTypes) Declares what model type this prediction is for. Binary, Numeric, Categorical, Score_Categorical.
:param feature_names_overwrite: Optional list<str> that if present will overwrite features.columns values. Must contain the same number of elements as features.columns.
:param prediction_timestamps: (list<int>) Optional list with same number of elements as prediction_labels field with unix epoch time in seconds to overwrite timestamp for each prediction. If None, prediction uses current timestamp.
:rtype : list<concurrent.futures.Future>
"""
# Validate model_id
if not isinstance(model_id, str):
raise TypeError(
f"model_id {model_id} is type {type(model_id)}, but must be a str"
)
# Validate prediction_ids
if not isinstance(prediction_ids, (pd.DataFrame, pd.Series)):
raise TypeError(
f"prediction_ids is type {type(prediction_ids)}, but expect one of: pd.DataFrame, pd.Series"
)
if prediction_labels is not None:
_validate_bulk_prediction(
model_version,
prediction_labels,
prediction_ids,
features,
feature_names_overwrite,
prediction_timestamps,
)
model_type = (
infer_model_type(prediction_labels.iloc[0])
if model_type is None
else model_type
)
if actual_labels is not None:
if not isinstance(actual_labels, (pd.DataFrame, pd.Series)):
raise TypeError(
f"actual_labels is type: {type(actual_labels)}, but expects one of: pd.DataFrame, pd.Series"
)
if actual_labels.shape[0] != prediction_ids.shape[0]:
raise ValueError(
f"actual_labels contains {actual_labels.shape[0]} elements, but must have the same as "
f"predictions_ids: {prediction_ids.shape[0]}. "
)
# Set model type if not yet set
model_type = (
infer_model_type(actual_labels.iloc[0])
if model_type is None
else model_type
)
if shap_values is not None:
if not isinstance(shap_values, pd.DataFrame):
raise TypeError(
f"shap_values is type {type(shap_values)}, but expect type pd.DataFrame."
)
if shap_values.shape[0] != prediction_ids.shape[0]:
raise ValueError(
f"shap_values has {shap_values.shape[0]} sets of values, but must match size of "
f"predictions_ids: {shap_values.shape[0]}. "
)
if isinstance(shap_values.columns, pd.core.indexes.numeric.NumericIndex):
raise TypeError(
f"shap_values.columns is of type {type(shap_values.columns)}, but expect elements to be str."
)
for name in shap_values.columns:
if not isinstance(name, str):
raise TypeError(
f"shap_values.column {name} is type {type(name)}, but expect str"
)
prediction_ids = prediction_ids.to_numpy()
prediction_labels = (
prediction_labels.to_numpy() if prediction_labels is not None else None
)
prediction_timestamps = (
prediction_timestamps.tolist()
if isinstance(prediction_timestamps, pd.Series)
else prediction_timestamps
)
if features is not None:
feature_names = feature_names_overwrite or features.columns
features = features.to_numpy()
actual_labels = actual_labels.to_numpy() if actual_labels is not None else None
shap_columns = shap_values.columns if shap_values is not None else None
shap_values = shap_values.to_numpy() if shap_values is not None else None
records = []
for row, v in enumerate(prediction_ids):
pred_id = v if (isinstance(v, str) or isinstance(v, int) or isinstance(v, float)) else v[0]
p = None
if prediction_labels is not None:
# if there is more than 1 dimension, and the second dimension size is 2 - TODO instead just guarantee shape is always (X,Y) instead of sometimes (X,)
if (
len(prediction_labels.shape) == 2
and prediction_labels.shape[1] == 2
):
label = _get_label(
value=(prediction_labels[row][0], prediction_labels[row][1]),
name="prediction",
model_type=model_type,
)
else:
label = _get_label(
value=prediction_labels[row],
name="prediction",
model_type=model_type,
)
p = public__pb2.Prediction(label=label)
if features is not None:
converted_feats = {}
for column, name in enumerate(feature_names):
val = get_value_object(value=features[row][column], name=name)
if val is not None:
converted_feats[str(name)] = val
feats = public__pb2.Prediction(features=converted_feats)
p.MergeFrom(feats)
if prediction_timestamps is not None:
p.timestamp.MergeFrom(get_timestamp(prediction_timestamps[row]))
a = None
if actual_labels is not None:
a = public__pb2.Actual(
label=_get_label(
value=actual_labels[row], name="actual", model_type=model_type
)
)
fi = None
if shap_values is not None:
converted_fi = {
name: shap_values[row][column]
for column, name in enumerate(shap_columns)
}
fi = public__pb2.FeatureImportances(feature_importances=converted_fi)
rec = public__pb2.Record(
prediction_id=str(pred_id),
prediction=p,
actual=a,
feature_importances=fi,
)
records.append(rec)
brs = get_bulk_records(
self._organization_key, model_id, model_version, bundle_records(records)
)
return self._post_bulk(records=brs, uri=self._bulk_url)
def log_validation_records(
self,
model_id: str,
model_version: str,
batch_id: str,
prediction_labels: Union[pd.DataFrame, pd.Series],
actual_labels: Union[pd.DataFrame, pd.Series],
prediction_scores: Optional[Union[pd.DataFrame, pd.Series]] = None,
prediction_ids: Optional[Union[pd.DataFrame, pd.Series]] = None,
model_type: Optional[ModelTypes] = None,
features: Optional[Union[pd.DataFrame, pd.Series]] = None,
prediction_timestamps: Optional[Union[List[int], pd.Series]] = None,
) -> List[cf.Future]:
"""Logs a set of validation records to Arize. Returns :class:`Future` object.
:param model_id: (str) Unique identifier for a given model.
:param model_type: (ModelTypes) Declares what model type these records are for. Binary, Numeric, Categorical, Score_Categorical.
:param model_version: (str) Unique identifier used to group together a subset of records for a given model_id.
:param batch_id: (str) Unique identifier used to group together a subset of validation records for a given model_id and model_version - akin to a validation set.
:param prediction_labels: 1-D Pandas DataFrame or Series. The predicted values for a given model input.
:param actual_labels: 1-D Pandas DataFrame or Series. The actual true values for a given model input.
:param prediction_scores: 1-D Pandas DataFrame or Series. The predicted scores for the corresponding predicted_label of classification model. If present, elements in prediction_labels must be of type str. Values are associates to the labels in the same index.
:param prediction_ids: 1-D Pandas DataFrame or Series. The prediction IDs for the corresponding predicted_label of a classification_model. If present, elements in prediction_labels must be of type str. Values are associates to the labels in the same index.
:param features: Optional 2-D Pandas DataFrame containing human readable and debuggable model features. DataFrames columns (df.columns) should contain feature names and must have same number of rows as actual_labels and prediction_labels. N.B. np.nan values are stripped from the record and manifest on our platform as a missing value (not 0.0 or NaN)
:param prediction_timestamps: (list<int>) Optional list with same number of elements as prediction_labels field with unix epoch time in seconds to overwrite timestamp for each prediction. If None, prediction uses current timestamp.
:rtype : list<concurrent.futures.Future>
"""
rec = ValidationRecords(
organization_key=self._organization_key,
model_id=model_id,
model_type=model_type,
model_version=model_version,
batch_id=batch_id,
features=features,
prediction_labels=prediction_labels,
actual_labels=actual_labels,
prediction_scores=prediction_scores,
prediction_ids=prediction_ids,
prediction_timestamps=prediction_timestamps,
)
rec.validate_inputs()
return self._post_preprod(records=rec.build_proto())
def log_training_records(
self,
model_id: str,
model_version: str,
prediction_labels: Union[pd.DataFrame, pd.Series],
actual_labels: Union[pd.DataFrame, pd.Series],
prediction_scores: Optional[Union[pd.DataFrame, pd.Series]] = None,
model_type: Optional[ModelTypes] = None,
features: Optional[Union[pd.DataFrame, pd.Series]] = None,
) -> List[cf.Future]:
"""Logs a stream of training records to Arize. Returns :class:`Future` object.
:param model_id: (str) Unique identifier for a given model.
:param model_version: (str) Unique identifier used to group together a subset of records for a given model_id.
:param model_type: (ModelTypes) Declares what model type these records are for. Binary, Numeric, Categorical, Score_Categorical.
:param prediction_labels: 1-D Pandas DataFrame or Series. The predicted values for a given model input.
:param actual_labels: 1-D Pandas DataFrame or Series. The actual true values for a given model input.
:param prediction_scores: 1-D Pandas DataFrame or Series. The predicted scores for the corresponding predicted_label of classification model. If present, elements in prediction_labels must be of type str. Values are associates to the labels in the same index.
:param features: Optional 2-D Pandas DataFrame containing human readable and debuggable model features. DataFrames columns (df.columns) should contain feature names and must have same number of rows as actual_labels and prediction_labels. N.B. np.nan values are stripped from the record and manifest on our platform as a missing value (not 0.0 or NaN)
:rtype : list<concurrent.futures.Future>
"""
rec = TrainingRecords(
organization_key=self._organization_key,
model_id=model_id,
model_type=model_type,
model_version=model_version,
features=features,
prediction_labels=prediction_labels,
prediction_scores=prediction_scores,
actual_labels=actual_labels,
)
rec.validate_inputs()
return self._post_preprod(records=rec.build_proto())
# Deprecated
def log_prediction(
self,
model_id: str,
model_version: str,
prediction_id: str,
prediction_label: Union[str, bool, int, float],
prediction_score: Optional[float] = None,
features: Optional[Dict[str, Union[str, bool, float, int]]] = None,
model_type: Optional[ModelTypes] = None,
time_overwrite: Optional[int] = None,
) -> cf.Future:
"""Logs a prediction to Arize via a POST request. Returns :class:`Future` object.
:param model_id: (str) Unique identifier for a given model
:param model_version: (str) Field used to group together a subset of predictions and actuals for a given model_id.
:param prediction_id: (str) Unique string identifier for a specific prediction. This value is used to match a prediction to an actual label in the Arize platform.
:param prediction_label: (one of bool, str, float, int) The predicted value for a given model input.
:param prediction_score: (float) Optional predicted score for the predicted_label of classification model. If present, the prediction_label must be of type str.
:param features: (str, <value>) Optional dictionary containing human readable and debuggable model features. Keys must be str, values one of str, bool, float, long.
:param model_type: (ModelTypes) Declares what model type this prediction is for. Binary, Numeric, Categorical, Score_Categorical.
:param time_overwrite: (int) Optional field with unix epoch time in seconds to overwrite timestamp for prediction. If None, prediction uses current timestamp.
:rtype : concurrent.futures.Future
"""
return self.log(
model_id=model_id,
prediction_id=prediction_id,
model_version=model_version,
prediction_label=prediction_label
if prediction_score is None
else (prediction_label, prediction_score),
features=features,
model_type=model_type,
prediction_timestamp=time_overwrite,
)
# Deprecated
def log_actual(
self,
model_id: str,
prediction_id: str,
actual_label: Union[str, bool, int, float],
model_type: Optional[ModelTypes] = None,
) -> cf.Future:
"""Logs an actual to Arize via a POST request. Returns :class:`Future` object.
:param model_id: (str) Unique identifier for a given model
:param prediction_id: (str) Unique string identifier for a specific prediction. This value is used to match a prediction to an actual label in the Arize platform.
:param actual_label: (one of bool, str, float, int) The actual true value for a given model input. This actual will be matched to the prediction with the same prediction_id as the one in this call.
:param model_type: (ModelTypes) Declares what model type this prediction is for. Binary, Numeric, Categorical, Score_Categorical.
:rtype : concurrent.futures.Future
"""
return self.log(
model_id=model_id,
model_type=model_type,
prediction_id=prediction_id,
actual_label=actual_label,
)
# Deprecated
def log_shap_values(
self,
model_id: str,
prediction_id: str,
shap_values: Dict[str, float],
) -> cf.Future:
"""Logs SHAP feature importance values for a given prediction to Arize via a POST request. Returns :class:`Future` object.
:param model_id: (str) Unique identifier for a given model.
:param prediction_id: (str) Unique string identifier for a specific prediction. This value is used to match a prediction to the SHAP values supplied in this request in the Arize platform.
:param shap_values: (str, float) Dictionary containing human readable and debuggable model features keys, along with SHAP feature importance values. Keys must be str, while values must be float.
:rtype : concurrent.futures.Future
"""
return self.log(
prediction_id=prediction_id,
model_id=model_id,
shap_values=shap_values,
)
# Deprecated
def log_bulk_shap_values(
self,
model_id: str,
prediction_ids: Union[pd.DataFrame, pd.Series],
shap_values: Union[pd.DataFrame, pd.Series],
) -> List[cf.Future]:
"""Logs a collection of SHAP feature importance value sets with Arize via a POST request. Returns list<:class:`Future`> object.
:param model_id: (str) Unique identifier for a given model
:param prediction_ids: 1-D Pandas DataFrame or Series with string elements. Each element corresponding to a unique string identifier for a specific prediction. Each element corresponds to the SHAP values of the same index.
:param shap_values: 1-D Pandas DataFrame or Series. The SHAP value sets for a set of predictions. SHAP value sets are correspond to the prediction ids with the same index.
:rtype : list<concurrent.futures.Future>
"""
return self.bulk_log(
model_id=model_id, prediction_ids=prediction_ids, shap_values=shap_values
)
# Deprecated
def log_bulk_predictions(
self,
model_id: str,
model_version: str,
prediction_ids: Union[pd.DataFrame, pd.Series],
prediction_labels: Union[pd.DataFrame, pd.Series],
prediction_scores: Optional[Union[pd.DataFrame, pd.Series]] = None,
features: Optional[Union[pd.DataFrame, pd.Series]] = None,
model_type: Optional[ModelTypes] = None,
feature_names_overwrite: Optional[List[str]] = None,
time_overwrite: Optional[List[int]] = None,
) -> List[cf.Future]:
"""Logs a collection of predictions with Arize via a POST request. Returns list<:class:`Future`> object.
:param model_id: (str) Unique identifier for a given model
:param model_type: (ModelTypes) Declares what model type this prediction is for. Binary, Numeric, Categorical, Score_Categorical.
:param model_version: (str) Field used to group together a subset of predictions and actuals for a given model_id.
:param prediction_ids: 1-D Pandas DataFrame or Series with string elements. Each element corresponding to a unique string identifier for a specific prediction. These values are needed to match latent actual labels to their original prediction labels. Each element corresponds to feature values of the same index.
:param prediction_labels: 1-D Pandas DataFrame or Series. The predicted values for a given model input. Values are associates to the ids in the same index.
:param prediction_scores: 1-D Pandas DataFrame or Series. The predicted scores for the corresponding predicted_label of classification model. If present, elements in prediction_labels must be of type str. Values are associates to the labels in the same index.
:param features: Optional 2-D Pandas DataFrame containing human readable and debuggable model features. DataFrames columns (df.columns) should contain feature names and must have same number of rows as prediction_ids and prediction_labels. N.B. np.nan values are stripped from the record and manifest on our platform as a missing value (not 0.0 or NaN)
:param feature_names_overwrite: Optional list<str> that if present will overwrite features.columns values. Must contain the same number of elements as features.columns.
:param time_overwrite: (list<int>) Optional list with same number of elements as prediction_labels field with unix epoch time in seconds to overwrite timestamp for each prediction. If None, prediction uses current timestamp.
:rtype : list<concurrent.futures.Future>
"""
return self.bulk_log(
model_id=model_id,
model_type=model_type,
model_version=model_version,
prediction_ids=prediction_ids,
prediction_labels=pd.concat([prediction_labels, prediction_scores], axis=1),
features=features,
feature_names_overwrite=feature_names_overwrite,
prediction_timestamps=time_overwrite,
)
# Deprecated
def log_bulk_actuals(
self,
model_id: str,
prediction_ids: Union[pd.DataFrame, pd.Series],
actual_labels: Union[pd.DataFrame, pd.Series],
model_type: Optional[ModelTypes] = None,
) -> List[cf.Future]:
"""Logs a collection of actuals with Arize via a POST request. Returns list<:class:`Future`> object.
:param model_id: (str) Unique identifier for a given model
:param model_type: (ModelTypes) Declares what model type this prediction is for. Binary, Numeric, Categorical, Score_Categorical.
:param prediction_ids: 1-D Pandas DataFrame or Series with string elements. Each element corresponding to a unique string identifier for a specific prediction. These values are needed to match latent actual labels to their original prediction labels. Each element corresponds to feature values of the same index.
:param actual_labels: 1-D Pandas DataFrame or Series. The actual true values for a given model input. Values are associates to the labels in the same index.
:rtype : list<concurrent.futures.Future>
"""
return self.bulk_log(
model_id=model_id,
model_type=model_type,
prediction_ids=prediction_ids,
actual_labels=actual_labels,
)
def _post_bulk(self, records, uri):
return [self._post(r, uri, k) for k, r in records.items()]
def _post(self, record, uri, indexes):
resp = self._session.post(
uri,
headers=self._header,
timeout=self._timeout,
json=MessageToDict(message=record, preserving_proto_field_name=True),
)
if indexes is not None and len(indexes) == 2:
resp.starting_index = indexes[0]
resp.ending_index = indexes[1]
return resp
def _post_preprod(self, records):
futures = []
for k, recs in records.items():
futures.append(
self._session.post(
self._stream_uri,
headers=self._header,
timeout=self._timeout,
data="\n".join(
json.dumps(
MessageToDict(message=d, preserving_proto_field_name=True)
)
for d in recs
),
)
)
return futures
|
py | b41089cdfde0b0e47ab7b7b64e1f689311761999 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
from wim import __version__
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with io.open('README.rst') as f:
readme = f.read()
with io.open('HISTORY.rst') as f:
history = f.read().replace('.. :changelog:', '')
with io.open('requirements.txt', encoding='utf-8') as f:
requirements = f.read().splitlines()
setup(
name='wim',
version=__version__,
description='wim is a command line tool to create Web images.',
long_description=readme + '\n\n' + history,
author='Ramiro Gómez',
author_email='[email protected]',
url='https://github.com/yaph/wim',
packages=['wim',],
package_dir={'wim': 'wim'},
include_package_data=True,
install_requires=requirements,
license='MIT',
zip_safe=False,
keywords='wim',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
entry_points={
'console_scripts': [
'wim = wim.wim:main'
]
}
)
|
py | b41089e3e64bce3e09d1bfbf8b69eb9c47be3aa9 | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'morning_cake_30028.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
py | b41089fb58cbab41da02a10987127bf559d4f7ca | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2019.2
# Email : [email protected]
###################################################################
"""
MDivider
"""
import six
from dayu_widgets.label import MLabel
from dayu_widgets.qt import QWidget, Qt, QFrame, QHBoxLayout, Property
class MDivider(QWidget):
'''
A divider line separates different content.
Property:
dayu_text: six.string_types
'''
_alignment_map = {
Qt.AlignCenter: 50,
Qt.AlignLeft: 20,
Qt.AlignRight: 80,
}
def __init__(self, text='', orientation=Qt.Horizontal, alignment=Qt.AlignCenter, parent=None):
super(MDivider, self).__init__(parent)
self._orient = orientation
self._text_label = MLabel().secondary()
self._left_frame = QFrame()
self._right_frame = QFrame()
self._main_lay = QHBoxLayout()
self._main_lay.setContentsMargins(0, 0, 0, 0)
self._main_lay.setSpacing(0)
self._main_lay.addWidget(self._left_frame)
self._main_lay.addWidget(self._text_label)
self._main_lay.addWidget(self._right_frame)
self.setLayout(self._main_lay)
if orientation == Qt.Horizontal:
self._left_frame.setFrameShape(QFrame.HLine)
self._left_frame.setFrameShadow(QFrame.Sunken)
self._right_frame.setFrameShape(QFrame.HLine)
self._right_frame.setFrameShadow(QFrame.Sunken)
else:
self._text_label.setVisible(False)
self._right_frame.setVisible(False)
self._left_frame.setFrameShape(QFrame.VLine)
self._left_frame.setFrameShadow(QFrame.Plain)
self.setFixedWidth(2)
self._main_lay.setStretchFactor(self._left_frame,
self._alignment_map.get(alignment, 50))
self._main_lay.setStretchFactor(self._right_frame,
100 - self._alignment_map.get(alignment, 50))
self._text = None
self.set_dayu_text(text)
def set_dayu_text(self, value):
"""
Set the divider's text.
When text is empty, hide the text_label and right_frame to ensure the divider not has a gap.
:param value: six.string_types
:return: None
"""
self._text = value
self._text_label.setText(value)
if self._orient == Qt.Horizontal:
self._text_label.setVisible(bool(value))
self._right_frame.setVisible(bool(value))
def get_dayu_text(self):
"""
Get current text
:return: six.string_types
"""
return self._text
dayu_text = Property(six.string_types[0], get_dayu_text, set_dayu_text)
@classmethod
def left(cls, text=''):
"""Create a horizontal divider with text at left."""
return cls(text, alignment=Qt.AlignLeft)
@classmethod
def right(cls, text=''):
"""Create a horizontal divider with text at right."""
return cls(text, alignment=Qt.AlignRight)
@classmethod
def center(cls, text=''):
"""Create a horizontal divider with text at center."""
return cls(text, alignment=Qt.AlignCenter)
@classmethod
def vertical(cls):
"""Create a vertical divider"""
return cls(orientation=Qt.Vertical)
|
py | b4108b0c7ac19c9171c7059809aeeaafaf435b07 | # vfs.py - Mercurial 'vfs' classes
#
# Copyright Matt Mackall <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import contextlib
import errno
import os
import shutil
import stat
import tempfile
import threading
from .i18n import _
from . import (
error,
pathutil,
pycompat,
util,
)
def _avoidambig(path, oldstat):
"""Avoid file stat ambiguity forcibly
This function causes copying ``path`` file, if it is owned by
another (see issue5418 and issue5584 for detail).
"""
def checkandavoid():
newstat = util.filestat.frompath(path)
# return whether file stat ambiguity is (already) avoided
return (not newstat.isambig(oldstat) or
newstat.avoidambig(path, oldstat))
if not checkandavoid():
# simply copy to change owner of path to get privilege to
# advance mtime (see issue5418)
util.rename(util.mktempcopy(path), path)
checkandavoid()
class abstractvfs(object):
"""Abstract base class; cannot be instantiated"""
def __init__(self, *args, **kwargs):
'''Prevent instantiation; don't call this from subclasses.'''
raise NotImplementedError('attempted instantiating ' + str(type(self)))
def tryread(self, path):
'''gracefully return an empty string for missing files'''
try:
return self.read(path)
except IOError as inst:
if inst.errno != errno.ENOENT:
raise
return ""
def tryreadlines(self, path, mode='rb'):
'''gracefully return an empty array for missing files'''
try:
return self.readlines(path, mode=mode)
except IOError as inst:
if inst.errno != errno.ENOENT:
raise
return []
@util.propertycache
def open(self):
'''Open ``path`` file, which is relative to vfs root.
Newly created directories are marked as "not to be indexed by
the content indexing service", if ``notindexed`` is specified
for "write" mode access.
'''
return self.__call__
def read(self, path):
with self(path, 'rb') as fp:
return fp.read()
def readlines(self, path, mode='rb'):
with self(path, mode=mode) as fp:
return fp.readlines()
def write(self, path, data, backgroundclose=False):
with self(path, 'wb', backgroundclose=backgroundclose) as fp:
return fp.write(data)
def writelines(self, path, data, mode='wb', notindexed=False):
with self(path, mode=mode, notindexed=notindexed) as fp:
return fp.writelines(data)
def append(self, path, data):
with self(path, 'ab') as fp:
return fp.write(data)
def basename(self, path):
"""return base element of a path (as os.path.basename would do)
This exists to allow handling of strange encoding if needed."""
return os.path.basename(path)
def chmod(self, path, mode):
return os.chmod(self.join(path), mode)
def dirname(self, path):
"""return dirname element of a path (as os.path.dirname would do)
This exists to allow handling of strange encoding if needed."""
return os.path.dirname(path)
def exists(self, path=None):
return os.path.exists(self.join(path))
def fstat(self, fp):
return util.fstat(fp)
def isdir(self, path=None):
return os.path.isdir(self.join(path))
def isfile(self, path=None):
return os.path.isfile(self.join(path))
def islink(self, path=None):
return os.path.islink(self.join(path))
def isfileorlink(self, path=None):
'''return whether path is a regular file or a symlink
Unlike isfile, this doesn't follow symlinks.'''
try:
st = self.lstat(path)
except OSError:
return False
mode = st.st_mode
return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
def reljoin(self, *paths):
"""join various elements of a path together (as os.path.join would do)
The vfs base is not injected so that path stay relative. This exists
to allow handling of strange encoding if needed."""
return os.path.join(*paths)
def split(self, path):
"""split top-most element of a path (as os.path.split would do)
This exists to allow handling of strange encoding if needed."""
return os.path.split(path)
def lexists(self, path=None):
return os.path.lexists(self.join(path))
def lstat(self, path=None):
return os.lstat(self.join(path))
def listdir(self, path=None):
return os.listdir(self.join(path))
def makedir(self, path=None, notindexed=True):
return util.makedir(self.join(path), notindexed)
def makedirs(self, path=None, mode=None):
return util.makedirs(self.join(path), mode)
def makelock(self, info, path):
return util.makelock(info, self.join(path))
def mkdir(self, path=None):
return os.mkdir(self.join(path))
def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
dir=self.join(dir), text=text)
dname, fname = util.split(name)
if dir:
return fd, os.path.join(dir, fname)
else:
return fd, fname
def readdir(self, path=None, stat=None, skip=None):
return util.listdir(self.join(path), stat, skip)
def readlock(self, path):
return util.readlock(self.join(path))
def rename(self, src, dst, checkambig=False):
"""Rename from src to dst
checkambig argument is used with util.filestat, and is useful
only if destination file is guarded by any lock
(e.g. repo.lock or repo.wlock).
To avoid file stat ambiguity forcibly, checkambig=True involves
copying ``src`` file, if it is owned by another. Therefore, use
checkambig=True only in limited cases (see also issue5418 and
issue5584 for detail).
"""
srcpath = self.join(src)
dstpath = self.join(dst)
oldstat = checkambig and util.filestat.frompath(dstpath)
if oldstat and oldstat.stat:
ret = util.rename(srcpath, dstpath)
_avoidambig(dstpath, oldstat)
return ret
return util.rename(srcpath, dstpath)
def readlink(self, path):
return os.readlink(self.join(path))
def removedirs(self, path=None):
"""Remove a leaf directory and all empty intermediate ones
"""
return util.removedirs(self.join(path))
def rmtree(self, path=None, ignore_errors=False, forcibly=False):
"""Remove a directory tree recursively
If ``forcibly``, this tries to remove READ-ONLY files, too.
"""
if forcibly:
def onerror(function, path, excinfo):
if function is not os.remove:
raise
# read-only files cannot be unlinked under Windows
s = os.stat(path)
if (s.st_mode & stat.S_IWRITE) != 0:
raise
os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
os.remove(path)
else:
onerror = None
return shutil.rmtree(self.join(path),
ignore_errors=ignore_errors, onerror=onerror)
def setflags(self, path, l, x):
return util.setflags(self.join(path), l, x)
def stat(self, path=None):
return os.stat(self.join(path))
def unlink(self, path=None):
return util.unlink(self.join(path))
def tryunlink(self, path=None):
"""Attempt to remove a file, ignoring missing file errors."""
util.tryunlink(self.join(path))
def unlinkpath(self, path=None, ignoremissing=False):
return util.unlinkpath(self.join(path), ignoremissing=ignoremissing)
def utime(self, path=None, t=None):
return os.utime(self.join(path), t)
def walk(self, path=None, onerror=None):
"""Yield (dirpath, dirs, files) tuple for each directories under path
``dirpath`` is relative one from the root of this vfs. This
uses ``os.sep`` as path separator, even you specify POSIX
style ``path``.
"The root of this vfs" is represented as empty ``dirpath``.
"""
root = os.path.normpath(self.join(None))
# when dirpath == root, dirpath[prefixlen:] becomes empty
# because len(dirpath) < prefixlen.
prefixlen = len(pathutil.normasprefix(root))
for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
yield (dirpath[prefixlen:], dirs, files)
@contextlib.contextmanager
def backgroundclosing(self, ui, expectedcount=-1):
"""Allow files to be closed asynchronously.
When this context manager is active, ``backgroundclose`` can be passed
to ``__call__``/``open`` to result in the file possibly being closed
asynchronously, on a background thread.
"""
# This is an arbitrary restriction and could be changed if we ever
# have a use case.
vfs = getattr(self, 'vfs', self)
if getattr(vfs, '_backgroundfilecloser', None):
raise error.Abort(
_('can only have 1 active background file closer'))
with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
try:
vfs._backgroundfilecloser = bfc
yield bfc
finally:
vfs._backgroundfilecloser = None
class vfs(abstractvfs):
'''Operate files relative to a base directory
This class is used to hide the details of COW semantics and
remote file access from higher level code.
'cacheaudited' should be enabled only if (a) vfs object is short-lived, or
(b) the base directory is managed by hg and considered sort-of append-only.
See pathutil.pathauditor() for details.
'''
def __init__(self, base, audit=True, cacheaudited=False, expandpath=False,
realpath=False):
if expandpath:
base = util.expandpath(base)
if realpath:
base = os.path.realpath(base)
self.base = base
self._audit = audit
if audit:
self.audit = pathutil.pathauditor(self.base, cached=cacheaudited)
else:
self.audit = (lambda path, mode=None: True)
self.createmode = None
self._trustnlink = None
@util.propertycache
def _cansymlink(self):
return util.checklink(self.base)
@util.propertycache
def _chmod(self):
return util.checkexec(self.base)
def _fixfilemode(self, name):
if self.createmode is None or not self._chmod:
return
os.chmod(name, self.createmode & 0o666)
def __call__(self, path, mode="r", text=False, atomictemp=False,
notindexed=False, backgroundclose=False, checkambig=False,
auditpath=True):
'''Open ``path`` file, which is relative to vfs root.
Newly created directories are marked as "not to be indexed by
the content indexing service", if ``notindexed`` is specified
for "write" mode access.
If ``backgroundclose`` is passed, the file may be closed asynchronously.
It can only be used if the ``self.backgroundclosing()`` context manager
is active. This should only be specified if the following criteria hold:
1. There is a potential for writing thousands of files. Unless you
are writing thousands of files, the performance benefits of
asynchronously closing files is not realized.
2. Files are opened exactly once for the ``backgroundclosing``
active duration and are therefore free of race conditions between
closing a file on a background thread and reopening it. (If the
file were opened multiple times, there could be unflushed data
because the original file handle hasn't been flushed/closed yet.)
``checkambig`` argument is passed to atomictemplfile (valid
only for writing), and is useful only if target file is
guarded by any lock (e.g. repo.lock or repo.wlock).
To avoid file stat ambiguity forcibly, checkambig=True involves
copying ``path`` file opened in "append" mode (e.g. for
truncation), if it is owned by another. Therefore, use
combination of append mode and checkambig=True only in limited
cases (see also issue5418 and issue5584 for detail).
'''
if auditpath:
if self._audit:
r = util.checkosfilename(path)
if r:
raise error.Abort("%s: %r" % (r, path))
self.audit(path, mode=mode)
f = self.join(path)
if not text and "b" not in mode:
mode += "b" # for that other OS
nlink = -1
if mode not in ('r', 'rb'):
dirname, basename = util.split(f)
# If basename is empty, then the path is malformed because it points
# to a directory. Let the posixfile() call below raise IOError.
if basename:
if atomictemp:
util.makedirs(dirname, self.createmode, notindexed)
return util.atomictempfile(f, mode, self.createmode,
checkambig=checkambig)
try:
if 'w' in mode:
util.unlink(f)
nlink = 0
else:
# nlinks() may behave differently for files on Windows
# shares if the file is open.
with util.posixfile(f):
nlink = util.nlinks(f)
if nlink < 1:
nlink = 2 # force mktempcopy (issue1922)
except (OSError, IOError) as e:
if e.errno != errno.ENOENT:
raise
nlink = 0
util.makedirs(dirname, self.createmode, notindexed)
if nlink > 0:
if self._trustnlink is None:
self._trustnlink = nlink > 1 or util.checknlink(f)
if nlink > 1 or not self._trustnlink:
util.rename(util.mktempcopy(f), f)
fp = util.posixfile(f, mode)
if nlink == 0:
self._fixfilemode(f)
if checkambig:
if mode in ('r', 'rb'):
raise error.Abort(_('implementation error: mode %s is not'
' valid for checkambig=True') % mode)
fp = checkambigatclosing(fp)
if backgroundclose:
if not self._backgroundfilecloser:
raise error.Abort(_('backgroundclose can only be used when a '
'backgroundclosing context manager is active')
)
fp = delayclosedfile(fp, self._backgroundfilecloser)
return fp
def symlink(self, src, dst):
self.audit(dst)
linkname = self.join(dst)
util.tryunlink(linkname)
util.makedirs(os.path.dirname(linkname), self.createmode)
if self._cansymlink:
try:
os.symlink(src, linkname)
except OSError as err:
raise OSError(err.errno, _('could not symlink to %r: %s') %
(src, err.strerror), linkname)
else:
self.write(dst, src)
def join(self, path, *insidef):
if path:
return os.path.join(self.base, path, *insidef)
else:
return self.base
opener = vfs
class proxyvfs(object):
def __init__(self, vfs):
self.vfs = vfs
@property
def options(self):
return self.vfs.options
@options.setter
def options(self, value):
self.vfs.options = value
class filtervfs(abstractvfs, proxyvfs):
'''Wrapper vfs for filtering filenames with a function.'''
def __init__(self, vfs, filter):
proxyvfs.__init__(self, vfs)
self._filter = filter
def __call__(self, path, *args, **kwargs):
return self.vfs(self._filter(path), *args, **kwargs)
def join(self, path, *insidef):
if path:
return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
else:
return self.vfs.join(path)
filteropener = filtervfs
class readonlyvfs(abstractvfs, proxyvfs):
'''Wrapper vfs preventing any writing.'''
def __init__(self, vfs):
proxyvfs.__init__(self, vfs)
def __call__(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rb'):
raise error.Abort(_('this vfs is read only'))
return self.vfs(path, mode, *args, **kw)
def join(self, path, *insidef):
return self.vfs.join(path, *insidef)
class closewrapbase(object):
"""Base class of wrapper, which hooks closing
Do not instantiate outside of the vfs layer.
"""
def __init__(self, fh):
object.__setattr__(self, r'_origfh', fh)
def __getattr__(self, attr):
return getattr(self._origfh, attr)
def __setattr__(self, attr, value):
return setattr(self._origfh, attr, value)
def __delattr__(self, attr):
return delattr(self._origfh, attr)
def __enter__(self):
return self._origfh.__enter__()
def __exit__(self, exc_type, exc_value, exc_tb):
raise NotImplementedError('attempted instantiating ' + str(type(self)))
def close(self):
raise NotImplementedError('attempted instantiating ' + str(type(self)))
class delayclosedfile(closewrapbase):
"""Proxy for a file object whose close is delayed.
Do not instantiate outside of the vfs layer.
"""
def __init__(self, fh, closer):
super(delayclosedfile, self).__init__(fh)
object.__setattr__(self, r'_closer', closer)
def __exit__(self, exc_type, exc_value, exc_tb):
self._closer.close(self._origfh)
def close(self):
self._closer.close(self._origfh)
class backgroundfilecloser(object):
"""Coordinates background closing of file handles on multiple threads."""
def __init__(self, ui, expectedcount=-1):
self._running = False
self._entered = False
self._threads = []
self._threadexception = None
# Only Windows/NTFS has slow file closing. So only enable by default
# on that platform. But allow to be enabled elsewhere for testing.
defaultenabled = pycompat.osname == 'nt'
enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
if not enabled:
return
# There is overhead to starting and stopping the background threads.
# Don't do background processing unless the file count is large enough
# to justify it.
minfilecount = ui.configint('worker', 'backgroundcloseminfilecount')
# FUTURE dynamically start background threads after minfilecount closes.
# (We don't currently have any callers that don't know their file count)
if expectedcount > 0 and expectedcount < minfilecount:
return
maxqueue = ui.configint('worker', 'backgroundclosemaxqueue')
threadcount = ui.configint('worker', 'backgroundclosethreadcount')
ui.debug('starting %d threads for background file closing\n' %
threadcount)
self._queue = util.queue(maxsize=maxqueue)
self._running = True
for i in range(threadcount):
t = threading.Thread(target=self._worker, name='backgroundcloser')
self._threads.append(t)
t.start()
def __enter__(self):
self._entered = True
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self._running = False
# Wait for threads to finish closing so open files don't linger for
# longer than lifetime of context manager.
for t in self._threads:
t.join()
def _worker(self):
"""Main routine for worker thread."""
while True:
try:
fh = self._queue.get(block=True, timeout=0.100)
# Need to catch or the thread will terminate and
# we could orphan file descriptors.
try:
fh.close()
except Exception as e:
# Stash so can re-raise from main thread later.
self._threadexception = e
except util.empty:
if not self._running:
break
def close(self, fh):
"""Schedule a file for closing."""
if not self._entered:
raise error.Abort(_('can only call close() when context manager '
'active'))
# If a background thread encountered an exception, raise now so we fail
# fast. Otherwise we may potentially go on for minutes until the error
# is acted on.
if self._threadexception:
e = self._threadexception
self._threadexception = None
raise e
# If we're not actively running, close synchronously.
if not self._running:
fh.close()
return
self._queue.put(fh, block=True, timeout=None)
class checkambigatclosing(closewrapbase):
"""Proxy for a file object, to avoid ambiguity of file stat
See also util.filestat for detail about "ambiguity of file stat".
This proxy is useful only if the target file is guarded by any
lock (e.g. repo.lock or repo.wlock)
Do not instantiate outside of the vfs layer.
"""
def __init__(self, fh):
super(checkambigatclosing, self).__init__(fh)
object.__setattr__(self, r'_oldstat', util.filestat.frompath(fh.name))
def _checkambig(self):
oldstat = self._oldstat
if oldstat.stat:
_avoidambig(self._origfh.name, oldstat)
def __exit__(self, exc_type, exc_value, exc_tb):
self._origfh.__exit__(exc_type, exc_value, exc_tb)
self._checkambig()
def close(self):
self._origfh.close()
self._checkambig()
|
py | b4108c0efa02a35bbd76fae96ebf31c276251bdf | """
Multiplicative Abelian Groups With Values
Often, one ends up with a set that forms an Abelian group. It would be
nice if one could return an Abelian group class to encapsulate the
data. However,
:func:`~sage.groups.abelian_gps.abelian_group.AbelianGroup` is an
abstract Abelian group defined by generators and relations. This
module implements :class:`AbelianGroupWithValues` that allows the
group elements to be decorated with values.
An example where this module is used is the unit group of a number
field, see :mod:`sage.rings.number_field.unit_group`. The units form a
finitely generated Abelian group. We can think of the elements either
as abstract Abelian group elements or as particular numbers in the
number field. The :func:`AbelianGroupWithValues` keeps track of these
associated values.
.. warning::
Really, this requires a group homomorphism from the abstract
Abelian group to the set of values. This is only checked if you
pass the ``check=True`` option to :func:`AbelianGroupWithValues`.
EXAMPLES:
Here is `\ZZ_6` with value `-1` assigned to the generator::
sage: Z6 = AbelianGroupWithValues([-1], [6], names='g')
sage: g = Z6.gen(0)
sage: g.value()
-1
sage: g*g
g^2
sage: (g*g).value()
1
sage: for i in range(7):
... print i, g^i, (g^i).value()
0 1 1
1 g -1
2 g^2 1
3 g^3 -1
4 g^4 1
5 g^5 -1
6 1 1
The elements come with a coercion embedding into the
:meth:`~AbelianGroupWithValues_class.values_group`, so you can use the
group elements instead of the values::
sage: CF3.<zeta> = CyclotomicField(3)
sage: Z3.<g> = AbelianGroupWithValues([zeta], [3])
sage: Z3.values_group()
Cyclotomic Field of order 3 and degree 2
sage: g.value()
zeta
sage: CF3(g)
zeta
sage: g + zeta
2*zeta
sage: zeta + g
2*zeta
"""
##########################################################################
# Copyright (C) 2012 Volker Braun <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL):
#
# http://www.gnu.org/licenses/
##########################################################################
from sage.misc.misc import prod
from sage.rings.integer import Integer
from sage.categories.morphism import Morphism
from sage.groups.abelian_gps.abelian_group import AbelianGroup_class, _normalize
from sage.groups.abelian_gps.abelian_group_element import AbelianGroupElement
def AbelianGroupWithValues(values, n, gens_orders=None, names='f', check=False, values_group=None):
"""
Construct an Abelian group with values associated to the generators.
INPUT:
- ``values`` -- a list/tuple/iterable of values that you want to
associate to the generators.
- ``n`` -- integer (optional). If not specified, will be derived
from ``gens_orders``.
- ``gens_orders`` -- a list of non-negative integers in the form
`[a_0, a_1, \dots, a_{n-1}]`, typically written in increasing
order. This list is padded with zeros if it has length less
than n. The orders of the commuting generators, with `0`
denoting an infinite cyclic factor.
- ``names`` -- (optional) names of generators
- ``values_group`` -- a parent or ``None`` (default). The common
parent of the values. This might be a group, but can also just
contain the values. For example, if the values are units in a
ring then the ``values_group`` would be the whole ring. If
``None`` it will be derived from the values.
EXAMPLES::
sage: G = AbelianGroupWithValues([-1], [6])
sage: g = G.gen(0)
sage: for i in range(7):
... print i, g^i, (g^i).value()
0 1 1
1 f -1
2 f^2 1
3 f^3 -1
4 f^4 1
5 f^5 -1
6 1 1
sage: G.values_group()
Integer Ring
The group elements come with a coercion embedding into the
:meth:`values_group`, so you can use them like their
:meth:`~sage.groups.abelian_gps.value.AbelianGroupWithValuesElement.value`
::
sage: G.values_embedding()
Generic morphism:
From: Multiplicative Abelian group isomorphic to C6
To: Integer Ring
sage: g.value()
-1
sage: 0 + g
-1
sage: 1 + 2*g
-1
"""
if check:
raise NotImplementedError('checking that the values are a homomorphism is not implemented')
gens_orders, names = _normalize(n, gens_orders, names)
if values_group is None:
from sage.structure.sequence import Sequence
values_group = Sequence(values).universe()
values = tuple( values_group(val) for val in values )
M = AbelianGroupWithValues_class(gens_orders, names, values, values_group)
return M
class AbelianGroupWithValuesEmbedding(Morphism):
"""
The morphism embedding the Abelian group with values in its values group.
INPUT:
- ``domain`` -- a :class:`AbelianGroupWithValues_class`
- ``codomain`` -- the values group (need not be in the cateory of
groups, e.g. symbolic ring).
EXAMPLES::
sage: Z4.<g> = AbelianGroupWithValues([I], [4])
sage: embedding = Z4.values_embedding(); embedding
Generic morphism:
From: Multiplicative Abelian group isomorphic to C4
To: Symbolic Ring
sage: embedding(1)
1
sage: embedding(g)
I
sage: embedding(g^2)
-1
"""
def __init__(self, domain, codomain):
"""
Construct the morphism
TESTS::
sage: Z4 = AbelianGroupWithValues([I], [4])
sage: from sage.groups.abelian_gps.values import AbelianGroupWithValuesEmbedding
sage: AbelianGroupWithValuesEmbedding(Z4, Z4.values_group())
Generic morphism:
From: Multiplicative Abelian group isomorphic to C4
To: Symbolic Ring
"""
assert domain.values_group() is codomain
from sage.categories.homset import Hom
Morphism.__init__(self, Hom(domain, codomain))
def _call_(self, x):
"""
Return the value associated to ``x``
INPUT:
- ``x`` -- a group element
OUTPUT:
Its value.
EXAMPLES::
sage: Z4.<g> = AbelianGroupWithValues([I], [4])
sage: embedding = Z4.values_embedding()
sage: embedding(g)
I
sage: embedding._call_(g)
I
"""
return x.value()
class AbelianGroupWithValuesElement(AbelianGroupElement):
"""
An element of an Abelian group with values assigned to generators.
INPUT:
- ``exponents`` -- tuple of integers. The exponent vector defining
the group element.
- ``parent`` -- the parent.
- ``value`` -- the value assigned to the group element or ``None``
(default). In the latter case, the value is computed as needed.
EXAMPLES::
sage: F = AbelianGroupWithValues([1,-1], [2,4])
sage: a,b = F.gens()
sage: TestSuite(a*b).run()
"""
def __init__(self, exponents, parent, value=None):
"""
Create an element
EXAMPLES::
sage: F = AbelianGroupWithValues([1,-1], [2,4])
sage: a,b = F.gens()
sage: a*b^-1 in F
True
sage: (a*b^-1).value()
-1
"""
self._value = value
AbelianGroupElement.__init__(self, exponents, parent)
def value(self):
"""
Return the value of the group element.
OUTPUT:
The value according to the values for generators, see
:meth:`~AbelianGroupWithValues.gens_values`.
EXAMPLES::
sage: G = AbelianGroupWithValues([5], 1)
sage: G.0.value()
5
"""
if self._value is None:
values = self.parent().gens_values()
self._value = prod( v**e for v,e in zip(values, self.exponents()) )
return self._value
def _div_(left, right):
"""
Divide ``left`` by ``right``
TESTS::
sage: G.<a,b> = AbelianGroupWithValues([5,2], 2)
sage: a._div_(b)
a*b^-1
sage: a/b
a*b^-1
sage: (a/b).value()
5/2
"""
m = AbelianGroupElement._div_(left, right)
m._value = left.value() / right.value()
return m
def _mul_(left, right):
"""
Multiply ``left`` and ``right``
TESTS::
sage: G.<a,b> = AbelianGroupWithValues([5,2], 2)
sage: a._mul_(b)
a*b
sage: a*b
a*b
sage: (a*b).value()
10
"""
m = AbelianGroupElement._mul_(left, right)
m._value = left.value() * right.value()
return m
def __pow__(self, n):
"""
Exponentiate ``self``
INPUT:
- ``n`` -- integer. The exponent.
TESTS::
sage: G.<a,b> = AbelianGroupWithValues([5,2], 2)
sage: a^3
a^3
sage: (a^3).value()
125
"""
m = Integer(n)
if n != m:
raise TypeError('argument n (= '+str(n)+') must be an integer.')
pow_self = AbelianGroupElement.__pow__(self, m)
pow_self._value = pow(self.value(), m)
return pow_self
def inverse(self):
"""
Return the inverse element.
EXAMPLE::
sage: G.<a,b> = AbelianGroupWithValues([2,-1], [0,4])
sage: a.inverse()
a^-1
sage: a.inverse().value()
1/2
sage: a.__invert__().value()
1/2
sage: (~a).value()
1/2
sage: (a*b).value()
-2
sage: (a*b).inverse().value()
-1/2
"""
m = AbelianGroupElement.inverse(self)
m._value = ~self.value()
return m
__invert__ = inverse
class AbelianGroupWithValues_class(AbelianGroup_class):
"""
The class of an Abelian group with values associated to the generator.
INPUT:
- ``generator_orders`` -- tuple of integers. The orders of the
generators.
- ``names`` -- string or list of strings. The names for the generators.
- ``values`` -- Tuple the same length as the number of
generators. The values assigned to the generators.
- ``values_group`` -- the common parent of the values.
EXAMPLES::
sage: G.<a,b> = AbelianGroupWithValues([2,-1], [0,4])
sage: TestSuite(G).run()
"""
Element = AbelianGroupWithValuesElement
def __init__(self, generator_orders, names, values, values_group):
"""
The Python constructor
TESTS::
sage: G = AbelianGroupWithValues([2,-1], [0,4]); G
Multiplicative Abelian group isomorphic to Z x C4
sage: cm = sage.structure.element.get_coercion_model()
sage: cm.explain(G, ZZ, operator.add)
Coercion on left operand via
Generic morphism:
From: Multiplicative Abelian group isomorphic to Z x C4
To: Integer Ring
Arithmetic performed after coercions.
Result lives in Integer Ring
Integer Ring
"""
self._values = values
self._values_group = values_group
AbelianGroup_class.__init__(self, generator_orders, names)
self._populate_coercion_lists_(embedding=self.values_embedding())
if self.ngens() != len(self._values):
raise ValueError('need one value per generator')
def gen(self, i=0):
"""
The `i`-th generator of the abelian group.
INPUT:
- ``i`` -- integer (default: 0). The index of the generator.
OUTPUT:
A group element.
EXAMPLES::
sage: F = AbelianGroupWithValues([1,2,3,4,5], 5,[],names='a')
sage: F.0
a0
sage: F.0.value()
1
sage: F.2
a2
sage: F.2.value()
3
sage: G = AbelianGroupWithValues([-1,0,1], [2,1,3])
sage: G.gens()
(f0, 1, f2)
"""
g = AbelianGroup_class.gen(self, i)
g._value = self._values[i]
return g
def gens_values(self):
"""
Return the values associated to the generators.
OUTPUT:
A tuple.
EXAMPLES::
sage: G = AbelianGroupWithValues([-1,0,1], [2,1,3])
sage: G.gens()
(f0, 1, f2)
sage: G.gens_values()
(-1, 0, 1)
"""
return self._values
def values_group(self):
"""
The common parent of the values.
The values need to form a multiplicative group, but can be
embedded in a larger structure. For example, if the values are
units in a ring then the :meth:`values_group` would be the
whole ring.
OUTPUT:
The common parent of the values, containing the group
generated by all values.
EXAMPLES::
sage: G = AbelianGroupWithValues([-1,0,1], [2,1,3])
sage: G.values_group()
Integer Ring
sage: Z4 = AbelianGroupWithValues([I], [4])
sage: Z4.values_group()
Symbolic Ring
"""
return self._values_group
def values_embedding(self):
"""
Return the embedding of ``self`` in :meth:`values_group`.
OUTPUT:
A morphism.
EXAMPLES::
sage: Z4 = AbelianGroupWithValues([I], [4])
sage: Z4.values_embedding()
Generic morphism:
From: Multiplicative Abelian group isomorphic to C4
To: Symbolic Ring
"""
return AbelianGroupWithValuesEmbedding(self, self.values_group())
|
py | b4108c7ea586420bcc454630980885d7e9bccd0a | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import os
from itertools import product
import numpy as np
from pyqclib.parsing import QCResult
from pyqclib.defs import UNITLESS
class TabularExtractor(object):
"""
Interface for creating extractor routines
where two variables are varied (row_vars and col_vars)
and the data is conveniently put in 2D array structure
"""
out_fmt = '%.18e'
out_delimiter = ' '
def __init__(self, row_vars, col_vars, output, unit):
"""
Arguments:
- `row_vars`:
- `col_vars`:
"""
self._row_vars = row_vars
self._col_vars = col_vars
self._output = output
self._unit = unit
self._data = np.zeros((len(row_vars), len(col_vars)))
def _populate(self):
for (ri, r), (ci, c) in product(enumerate(self._row_vars), enumerate(self._col_vars)):
prop_getter = self.mk_prop_getter(r, c)
prop_getter_args = self.mk_prop_getter_args(r, c)
val = prop_getter(*prop_getter_args)
if prop_getter.returns_with_unit:
self._data[ri, ci] = float(val.rescale(self._unit))
else:
self._data[ri, ci] = val
def extract(self):
self._populate()
np.savetxt(self._output, self._data, fmt = self.out_fmt, delimiter = self.out_delimiter)
def mk_prop_getter(self, row_var, col_var):
pass
def mk_prop_getter_args(self, row_var, col_var):
pass
class QCTabularExtractor(TabularExtractor):
_qcres = {}
prop = 'SET_THIS_IN_SUBCLASS'
def qcres(self, row_var, col_var):
""" Caches QCResult instances """
path = self._path_resolver(row_var, col_var)
if not path in self._qcres:
self._qcres[path] = QCResult(path)
return self._qcres[path]
def _path_resolver(self, row_var, col_var):
""" To be subclassed """
pass
def mk_prop_getter(self, row_var, col_var):
return getattr(self.qcres(row_var, col_var), self.prop)
|
py | b4108e479305095310b6467404d1be13a915ab90 | # Copyright 2010 ITASoftware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import types
import time
import logging
import random
from math import floor
import rrdtool
import coil
from django.conf import settings
from django.http import HttpResponse
from railroad.errors import RailroadError
DAY = 60 * 60 * 24 # Seconds in a day
logger = logging.getLogger(__name__)
def sigfigs(float):
"""Round float using desired sigfigs"""
desired_sigfigs = 3
powers = range(desired_sigfigs)
power = 0
while power < powers:
if float / pow(10, power) < 1:
break
power += 1
if power == 3:
return int(float)
else:
return round(float, desired_sigfigs - power)
def labelize(data, index, base, unit):
"""Return a label containing statistics"""
statistics = data[index]['statistics']
cur = str(sigfigs(statistics['cur'] / base)) \
if statistics['cur'] != None else 'Null'
return (' (cur: %s%s, min: %s%s, max: %s%s, avg: %s%s)' %
(cur, unit, str(sigfigs(statistics['min'] / base)), unit,
str(sigfigs(statistics['max'] / base)), unit,
str(sigfigs(statistics['avg'] / base)), unit))
def getColors(names):
rng_state = random.getstate()
n = len(names)
colors = []
offset = 0
for i in range(n):
random.seed(names[i])
# Make a random color
h = 360 * ((float(i) / n + offset) % 1)
s = 0.6 + 0.4 * random.random()
l = 0.375 + 0.25 * random.random()
# convert it from HSL to RGB
c = (1 - abs(2 * l - 1)) * s
hp = floor(h / 60)
x = c * (1 - abs(hp % 2 - 1))
(rp, gp, bp) = {0: (c, x, 0), 1: (x, c, 0), 2: (0, c, x),
3: (0, x, c), 4: (x, 0, c), 5: (c, 0, x)}[hp]
m = l - 0.5 * c
r, g, b = [x * 256 for x in (rp + m, gp + m, bp + m)]
# Convert it to a hex color
colors.append('#%02x%02x%02x' % (r, g, b))
random.setstate(rng_state)
return colors
def get_data(host, service, start=None, end=None, resolution='150'):
if not end:
end = int(time.time())
if not start:
start = end - DAY
rra_path = settings.RRA_PATH
rrd = '%s%s/%s.rrd' % (rra_path, host, service)
coilfile = '%s%s/%s.coil' % (rra_path, host, service)
railroad_conf = 'railroad_conf'
statistics = 'statistics'
trend_attributes = ['color', 'stack', 'scale', 'display']
# calculate custom resolution
resolution = (int(end) - int(start)) / int(resolution)
# Flush rrdcached as a separate command rather as part of fetch().
# A failure to flush isn't fatal, just results in stale data.
daemon = getattr(settings, 'RRDCACHED_ADDRESS', None)
if daemon:
try:
rrdtool.flushcached('--daemon', str(daemon), str(rrd))
except rrdtool.error, ex:
logger.warning('rrdcached flush failed: %s', ex)
# rrdtool hates unicode strings, and Django gives us one,
# so convert to ascii
rrdslice = rrdtool.fetch(str(rrd),
'--start', str(start),
'--end', str(end),
'--resolution', str(resolution),
'AVERAGE')
time_struct = time.gmtime()
time_dict = {'h': time_struct.tm_hour, 'm': time_struct.tm_min, \
's': time_struct.tm_sec}
current_time = '%(h)02d:%(m)02d:%(s)02d UTC' % time_dict
# Parse the data
actual_start, actual_end, res = rrdslice[0]
# Multiply by 1000 to convert to JS timestamp (millisecond resolution)
res *= 1000
coilstring = open(coilfile).read()
coilstruct = coil.parse(coilstring)
query = coilstruct.get('query', {})
if not(query):
raise RailroadError("OMG PONIES! query doesn't exist in coil file")
# Graph options for FLOT
graph_options = {
'xaxis': {
'mode': 'time',
},
'yaxis': {},
'legend': {'position': 'nw'},
'selection': {'mode': 'x'},
'pan': {'interactive': True},
'grid': {
'hoverable': True,
}
}
# Handle unconventional trend definitions
root_trend = coilstruct.get('trend', {})
all_labels = rrdslice[1]
labels = []
root_label = None
if root_trend:
root_label = root_trend.get('label', None)
if not(root_label):
root_label = coilstruct.get('label', None)
compound = query.get('type') == 'compound'
if compound:
for key in query.keys():
val = query.get(key)
if isinstance(val, coil.struct.Struct):
trend = val.get('trend', None)
if trend and trend.get('type', None):
label = trend.get('label', None)
if not(label):
label = key
labels.append((key, label))
if 'query' in all_labels:
trend = query.get('trend', None)
if trend:
query_label = trend.get('label', None)
if not(query_label):
query_label = root_label
labels.append(('query', query_label if query_label else 'Result'))
if '_result' in all_labels:
labels.append(('_result', root_label if root_label else 'Result'))
length = len(labels)
indices = range(length)
# flot_data and flot_data are of the format
# [ { label: "Foo", data: [ [10, 1], [17, -14], [30, 5] ] },
# { label: "Bar", data: [ [11, 13], [19, 11], [30, -7] ] } ]
# See Flot Reference (http://flot.googlecode.com/svn/trunk/API.txt)
flot_data = [{'label': label[1], railroad_conf: {}, 'data': []}
for label in labels]
labels = map(lambda x: x[0], labels)
state_data = []
graph_options['colors'] = getColors(labels)
# Reading graph options
for index in indices:
key = labels[index]
trend = query.get(key, {}).get('trend', {})
if not(trend):
continue
trend_settings = {}
for var in trend_attributes:
trend_settings[var] = trend.get(var, '')
flot_data[index]['lines'] = {'show': True}
if trend_settings['display']:
flot_data[index]['lines']['fill'] = (
0.5 if trend_settings['display'] == 'area' else 0)
if trend_settings['scale']:
flot_data[index][railroad_conf]['scale'] = trend_settings['scale']
else:
flot_data[index][railroad_conf]['scale'] = 1
if trend_settings['color']:
flot_data[index]['color'] = trend_settings['color']
if trend_settings['stack']:
flot_data[index]['stack'] = True
if index > 0:
flot_data[index-1]['stack'] = True
# See above
x = actual_start * 1000
transform = [all_labels.index(z) for z in labels]
state_index = all_labels.index('_state')
# Set defaults
datapoint = rrdslice[2][0]
for index in indices:
flot_data[index][statistics] = {}
flot_data[index][statistics]['cur'] = None
flot_data[index][statistics]['num'] = 0
flot_data[index][statistics]['sum'] = 0
flot_data[index][statistics]['max'] = None
flot_data[index][statistics]['min'] = None
if not 'scale' in flot_data[index][railroad_conf]:
flot_data[index][railroad_conf]['scale'] = 1
data = datapoint[transform[index]]
if data:
data *= flot_data[index][railroad_conf]['scale']
flot_data[index][statistics]['max'] = data
flot_data[index][statistics]['min'] = data
# Loop over all data and aggregate it in flot's desired format
for datapoints in rrdslice[2]:
data = datapoints[state_index]
state_data.append([x, data])
for index in indices:
data = datapoints[transform[index]]
if datapoints[state_index] != None:
flot_data[index][statistics]['cur'] = data
if data != None:
flot_data[index][statistics]['num'] += 1
data *= flot_data[index][railroad_conf]['scale']
flot_data[index][statistics]['sum'] += data
if (flot_data[index][statistics]['max'] == None or
data > flot_data[index][statistics]['max']):
flot_data[index][statistics]['max'] = data
if (flot_data[index][statistics]['min'] == None or
data < flot_data[index][statistics]['min']):
flot_data[index][statistics]['min'] = data
flot_data[index]['data'].append([x, data])
if 'lines' not in flot_data[index]:
flot_data[index]['lines'] = {}
flot_data[index]['lines']['show'] = True
x += res
empty_graph = True
for index in indices:
if flot_data[index][statistics]['num']:
empty_graph = False
base = 1000
max = 100
if length > 0:
value = query.get(labels[0], {}).get('trend', {}).get('base', '')
if value:
base = int(value)
max = flot_data[0][statistics]['max']
for index in indices:
if flot_data[index][statistics]['num'] > 0:
flot_data[index][statistics]['avg'] = (
flot_data[index][statistics]['sum'] /
flot_data[index][statistics]['num'])
if flot_data[index][statistics]['max'] > max:
max = flot_data[index][statistics]['max']
# Compute appropriate unit from base
bases = ['', 'K', 'M', 'G', 'T']
for interval in range(len(bases)):
if max != None and (max / pow(base, interval)) <= base:
break
# XXX: What are these supposed to do?
final_base = pow(base, interval)
unit = bases[interval]
if max != None:
graph_options['yaxis']['max'] = max * 1.1 + 1
if root_trend and max != None:
axis_max = root_trend.get('axis_max', '')
if axis_max and graph_options['yaxis']['max'] < axis_max:
graph_options['yaxis']['max'] = axis_max * 1.1 + 1
if root_trend:
axis_label = root_trend.get('axis_label', '')
if axis_label:
graph_options['yaxis']['label'] = axis_label
for index in indices:
del(flot_data[index][railroad_conf])
# Set background of graph based on state
colors = ['#BBFFBB', '#FFFFBB', '#FFBBBB', '#C0C0C0']
markings = []
state = state_data[0][1]
if type(state) == types.FloatType:
state = int(state) if float.is_integer(state) else 3
markings.append({'xaxis': {'from': state_data[0][0]}, \
'color': colors[state]})
for x, y in state_data:
if type(y) == types.FloatType:
y = int(y) if float.is_integer(y) else 3
if y != state:
if type(state) == types.IntType:
markings[-1]['xaxis']['to'] = x
state = y
if type(state) == types.IntType:
markings.append({'xaxis': {'from': x}, 'color': colors[state]})
if type(state) == types.FloatType:
markings[-1]['xaxis']['to'] = state_data[-1][0]
empty_graph = empty_graph and (not(len(markings)))
graph_options['grid']['markings'] = markings
# Pass state, BUT DONT DRAW!! this is so that graphs with ONLY state
# still draw (otherwise they don't get axes, ticks, etc)
flot_data.append({'data': state_data, 'lines': {'show': False}})
result = {'options': graph_options, 'data': flot_data, 'base': base,
'empty': empty_graph, 'current_time': current_time,
'start': start, 'end': end,
}
return result
def index(request, host, service, start, end, resolution='150'):
"""Reads the rrd and returns the data in flot-friendly format"""
result = get_data(host, service, start, end, resolution)
return HttpResponse(json.dumps(result))
|
py | b4108fc06cb9c8c0f4d1d66769b27221a3812cba | from setuptools import setup, find_packages
setup(name='peitho',
version='0.1.3',
description='Perfecting Experiments with Information Theory',
url='https://github.com/MichaelPHStumpf/Peitho',
download_url='https://github.com/MichaelPHStumpf/Peitho/archive/0.1.3.tar.gz',
author='Leander Dony, Scott Ward, Jonas Mackerodt, Juliane Liepe, Michael PH Stumpf',
author_email='[email protected]',
license='MIT',
packages=find_packages(),
include_package_data=True,
install_requires = [
'pycuda',
'numpy',
'matplotlib',
'python-libsbml'
],
entry_points = {
'console_scripts': ['peitho=peitho.main.main:main']
},
keywords = ['information theory','entropy','experimental design'],
classifiers = ['Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7'],
zip_safe=False)
|
py | b410911db36b5a1670684da3ffe8437a89f7b4ee | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ProtectionContainer(Model):
"""Base class for container with backup items. Containers with specific
workloads are derived from this class.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AzureBackupServerContainer, AzureSqlContainer,
AzureStorageContainer, AzureWorkloadContainer, DpmContainer,
GenericContainer, IaaSVMContainer, MabContainer
All required parameters must be populated in order to send to Azure.
:param friendly_name: Friendly name of the container.
:type friendly_name: str
:param backup_management_type: Type of backup managemenent for the
container. Possible values include: 'Invalid', 'AzureIaasVM', 'MAB',
'DPM', 'AzureBackupServer', 'AzureSql', 'AzureStorage', 'AzureWorkload',
'DefaultBackup'
:type backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.models.BackupManagementType
:param registration_status: Status of registration of the container with
the Recovery Services Vault.
:type registration_status: str
:param health_status: Status of health of the container.
:type health_status: str
:param container_type: Required. Constant filled by server.
:type container_type: str
"""
_validation = {
'container_type': {'required': True},
}
_attribute_map = {
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'backup_management_type': {'key': 'backupManagementType', 'type': 'str'},
'registration_status': {'key': 'registrationStatus', 'type': 'str'},
'health_status': {'key': 'healthStatus', 'type': 'str'},
'container_type': {'key': 'containerType', 'type': 'str'},
}
_subtype_map = {
'container_type': {'AzureBackupServerContainer': 'AzureBackupServerContainer', 'AzureSqlContainer': 'AzureSqlContainer', 'StorageContainer': 'AzureStorageContainer', 'AzureWorkloadContainer': 'AzureWorkloadContainer', 'DPMContainer': 'DpmContainer', 'GenericContainer': 'GenericContainer', 'IaaSVMContainer': 'IaaSVMContainer', 'Windows': 'MabContainer'}
}
def __init__(self, **kwargs):
super(ProtectionContainer, self).__init__(**kwargs)
self.friendly_name = kwargs.get('friendly_name', None)
self.backup_management_type = kwargs.get('backup_management_type', None)
self.registration_status = kwargs.get('registration_status', None)
self.health_status = kwargs.get('health_status', None)
self.container_type = None
|
py | b410917da4aebdfb934ebf36c3af5d189255da85 | from django.urls import include, path
urlpatterns = [
path('', include('homepage.urls')),
]
|
py | b41091a672fc6c5cd6195f553e5d343c254d8931 | from .modules import *
from .options import *
from .sdk import NftlabsSdk, ThirdwebSdk
|
py | b41091b732e6c919adf2ffec6818500adc05ca6f | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-07-28 16:58
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blog', '0029_document_chismes_de'),
]
operations = [
migrations.AddField(
model_name='document',
name='author',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
|
py | b410938b59f9b6fb3595914286c26ebff0fdb582 | """
Handle Q smearing
"""
#####################################################################
#This software was developed by the University of Tennessee as part of the
#Distributed Data Analysis of Neutron Scattering Experiments (DANSE)
#project funded by the US National Science Foundation.
#See the license text in license.txt
#copyright 2008, University of Tennessee
######################################################################
import math
import logging
import sys
import numpy as np # type: ignore
from numpy import pi, exp # type:ignore
from sasmodels.resolution import Slit1D, Pinhole1D
from sasmodels.sesans import SesansTransform
from sasmodels.resolution2d import Pinhole2D
from .nxsunit import Converter
def smear_selection(data, model = None):
"""
Creates the right type of smearer according
to the data.
The canSAS format has a rule that either
slit smearing data OR resolution smearing data
is available.
For the present purpose, we choose the one that
has none-zero data. If both slit and resolution
smearing arrays are filled with good data
(which should not happen), then we choose the
resolution smearing data.
:param data: Data1D object
:param model: sas.model instance
"""
# Sanity check. If we are not dealing with a SAS Data1D
# object, just return None
# This checks for 2D data (does not throw exception because fail is common)
if data.__class__.__name__ not in ['Data1D', 'Theory1D']:
if data is None:
return None
elif data.dqx_data is None or data.dqy_data is None:
return None
return PySmear2D(data)
# This checks for 1D data with smearing info in the data itself (again, fail is likely; no exceptions)
if not hasattr(data, "dx") and not hasattr(data, "dxl")\
and not hasattr(data, "dxw"):
return None
# Look for resolution smearing data
# This is the code that checks for SESANS data; it looks for the file loader
# TODO: change other sanity checks to check for file loader instead of data structure?
_found_sesans = False
#if data.dx is not None and data.meta_data['loader']=='SESANS':
if data.dx is not None and data.isSesans:
#if data.dx[0] > 0.0:
if np.size(data.dx[data.dx <= 0]) == 0:
_found_sesans = True
# if data.dx[0] <= 0.0:
if np.size(data.dx[data.dx <= 0]) > 0:
raise ValueError('one or more of your dx values are negative, please check the data file!')
if _found_sesans == True:
#Pre-compute the Hankel matrix (H)
qmax, qunits = data.sample.zacceptance
SElength = Converter(data._xunit)(data.x, "A")
zaccept = Converter(qunits)(qmax, "1/A"),
Rmax = 10000000
hankel = SesansTransform(data.x, SElength, zaccept, Rmax)
# Then return the actual transform, as if it were a smearing function
return PySmear(hankel, model, offset=0)
_found_resolution = False
if data.dx is not None and len(data.dx) == len(data.x):
# Check that we have non-zero data
if data.dx[0] > 0.0:
_found_resolution = True
#print "_found_resolution",_found_resolution
#print "data1D.dx[0]",data1D.dx[0],data1D.dxl[0]
# If we found resolution smearing data, return a QSmearer
if _found_resolution == True:
return pinhole_smear(data, model)
# Look for slit smearing data
_found_slit = False
if data.dxl is not None and len(data.dxl) == len(data.x) \
and data.dxw is not None and len(data.dxw) == len(data.x):
# Check that we have non-zero data
if data.dxl[0] > 0.0 or data.dxw[0] > 0.0:
_found_slit = True
# Sanity check: all data should be the same as a function of Q
for item in data.dxl:
if data.dxl[0] != item:
_found_resolution = False
break
for item in data.dxw:
if data.dxw[0] != item:
_found_resolution = False
break
# If we found slit smearing data, return a slit smearer
if _found_slit == True:
return slit_smear(data, model)
return None
class PySmear(object):
"""
Wrapper for pure python sasmodels resolution functions.
"""
def __init__(self, resolution, model, offset=None):
self.model = model
self.resolution = resolution
if offset is None:
offset = np.searchsorted(self.resolution.q_calc, self.resolution.q[0])
self.offset = offset
def apply(self, iq_in, first_bin=0, last_bin=None):
"""
Apply the resolution function to the data.
Note that this is called with iq_in matching data.x, but with
iq_in[first_bin:last_bin] set to theory values for these bins,
and the remainder left undefined. The first_bin, last_bin values
should be those returned from get_bin_range.
The returned value is of the same length as iq_in, with the range
first_bin:last_bin set to the resolution smeared values.
"""
if last_bin is None: last_bin = len(iq_in)
start, end = first_bin + self.offset, last_bin + self.offset
q_calc = self.resolution.q_calc
iq_calc = np.empty_like(q_calc)
if start > 0:
iq_calc[:start] = self.model.evalDistribution(q_calc[:start])
if end+1 < len(q_calc):
iq_calc[end+1:] = self.model.evalDistribution(q_calc[end+1:])
iq_calc[start:end+1] = iq_in[first_bin:last_bin+1]
smeared = self.resolution.apply(iq_calc)
return smeared
__call__ = apply
def get_bin_range(self, q_min=None, q_max=None):
"""
For a given q_min, q_max, find the corresponding indices in the data.
Returns first, last.
Note that these are indexes into q from the data, not the q_calc
needed by the resolution function. Note also that these are the
indices, not the range limits. That is, the complete range will be
q[first:last+1].
"""
q = self.resolution.q
first = np.searchsorted(q, q_min)
last = np.searchsorted(q, q_max)
return first, min(last,len(q)-1)
def slit_smear(data, model=None):
q = data.x
width = data.dxw if data.dxw is not None else 0
height = data.dxl if data.dxl is not None else 0
# TODO: width and height seem to be reversed
return PySmear(Slit1D(q, height, width), model)
def pinhole_smear(data, model=None):
q = data.x
width = data.dx if data.dx is not None else 0
return PySmear(Pinhole1D(q, width), model)
class PySmear2D(object):
"""
Q smearing class for SAS 2d pinhole data
"""
def __init__(self, data=None, model=None):
self.data = data
self.model = model
self.accuracy = 'Low'
self.limit = 3.0
self.index = None
self.coords = 'polar'
self.smearer = True
def set_accuracy(self, accuracy='Low'):
"""
Set accuracy.
:param accuracy: string
"""
self.accuracy = accuracy
def set_smearer(self, smearer=True):
"""
Set whether or not smearer will be used
:param smearer: smear object
"""
self.smearer = smearer
def set_data(self, data=None):
"""
Set data.
:param data: DataLoader.Data_info type
"""
self.data = data
def set_model(self, model=None):
"""
Set model.
:param model: sas.models instance
"""
self.model = model
def set_index(self, index=None):
"""
Set index.
:param index: 1d arrays
"""
self.index = index
def get_value(self):
"""
Over sampling of r_nbins times phi_nbins, calculate Gaussian weights,
then find smeared intensity
"""
if self.smearer:
res = Pinhole2D(data=self.data, index=self.index,
nsigma=3.0, accuracy=self.accuracy,
coords=self.coords)
val = self.model.evalDistribution(res.q_calc)
return res.apply(val)
else:
index = self.index if self.index is not None else slice(None)
qx_data = self.data.qx_data[index]
qy_data = self.data.qy_data[index]
q_calc = [qx_data, qy_data]
val = self.model.evalDistribution(q_calc)
return val
|
py | b410945be9dca0f909d94561f1e0b66cc1cc11a5 | """
Contains a class for adding basic shadows to sprites.
"""
import pygame as pg
class Shadow(pg.sprite.Sprite):
"""A simple class for adding shadows to sprites."""
def __init__(self, size, lock_rect, **kwargs):
"""
Arguments are the size (width, height), and the rect that the shadow
will lock to (the rect of the sprite with the shadow).
See process_kwargs for detail on customizing via keyword.
"""
pg.sprite.Sprite.__init__(self)
self.process_kwargs(kwargs)
self.lock_rect = lock_rect
self.image = pg.Surface(size).convert_alpha()
self.image.fill((0,0,0,0))
self.rect = self.image.get_rect()
pg.draw.ellipse(self.image, self.color, self.rect.inflate(-1,-1))
def process_kwargs(self, kwargs):
"""
Custom values for the attribute of lock_rect to lock to,
the color, and the offset from the center can be passed via keyword.
"""
defaults = {"lock_attr" : "midbottom",
"color" : (0, 0, 50, 150),
"offset" : (0, 0)}
for kwarg in kwargs:
if kwarg in defaults:
defaults[kwarg] = kwargs[kwarg]
else:
raise AttributeError("Invalid keyword {}".format(kwarg))
self.__dict__.update(defaults)
def update(self, *args):
"""
Shadow will be centered on the self.lock_attr attribute of the
self.lock_rect (usually midbottom). The self.offset attribute allows a
shadow to blit offset from the chosen center point; most useful for
flying monsters.
"""
self.rect.center = getattr(self.lock_rect, self.lock_attr)
self.rect.move_ip(self.offset)
|
py | b41094b8095d1be1e0aa389a3fce16b8a10eafa6 | from abc import ABC, abstractmethod
from enum import Enum
from base64 import b64encode, b64decode
from typing import List
class RoundResult(Enum):
error = 0
fail = 1
win = 2
class Wordle(ABC):
def __init__(self, target: str, rounds: int):
self.target = target
self.rounds = rounds
self.cur_round = 1
def play(self):
while self.cur_round <= self.rounds:
result = self.play_round(self.cur_round)
if result == RoundResult.error:
print("Invalid input")
continue
elif result == RoundResult.win:
return True
self.cur_round += 1
return False
def play_round(self, round) -> RoundResult:
inp = input(f"Round {round} > ")
if not self.check_input(inp):
return RoundResult.error
diff = self.compare(inp, self.target)
if self.is_win(diff):
return RoundResult.win
else:
print(diff)
return RoundResult.fail
@abstractmethod
def check_input(self, s: str) -> bool:
pass
@abstractmethod
def compare(self, s: str, target: str) -> str:
pass
@abstractmethod
def is_win(self, s: str) -> bool:
pass
class B64dle(Wordle):
def __init__(self, words: List[str], target: str, rounds: int):
super().__init__(b64encode(target.encode()).decode(), rounds)
self.words = words
self.exact = "O"
self.contains = "-"
self.wrong = "X"
def check_input(self, s: str) -> bool:
try:
word = b64decode(s.encode()).decode()
if word not in self.words:
return False
return len(s) == len(self.target)
except:
return False
def compare(self, s: str, target: str) -> str:
assert len(s) == len(target)
ret = ""
for i, c in enumerate(s):
if c == target[i]:
ret += self.exact
elif c in target:
ret += self.contains
else:
ret += self.wrong
return ret
def is_win(self, s: str) -> bool:
return all([x == self.exact for x in s])
with open("five_letter_words.txt") as f:
# https://raw.githubusercontent.com/charlesreid1/five-letter-words/b45fda30524a981c73ec709618271cecfb51c361/sgb-words.txt
words = list(map(str.strip, f))
|
py | b4109632b18d9dd58ee9986c2080438f948815d4 | from abc import ABC, abstractmethod
class Pyrak:
VERSION = "1.0.0"
DEFAULT_PROTOCOL_VERSION = "1"
PRIORITY_NORMAL = 0
PRIORITY_IMMEDIATE = 1
FLAG_NEED_ACK = 0b00001000
PACKET_ENCAPSULATED = 0x01
PACKET_OPEN_SESSION = 0x02
PACKET_CLOSE_SESSION = 0x03
PACKET_INVALID_SESSION = 0x04
PACKET_SEND_QUEUE = 0x05
PACKET_ACK_NOTIFICATION = 0x06
PACKET_SET_OPTION = 0x07
PACKET_RAW = 0x08
PACKET_BLOCK_ADDRESS = 0x09
PACKET_UNBLOCK_ADDRESS = 0x10
PACKET_REPORT_PING = 0x11
PACKET_SHUTDOWN = 0x7e
PACKET_EMERGENCY_SHUTDOWN = 0x7f
SYSTEM_ADDRESS_COUNT = 20 |
py | b4109669847fdebd843f7afaac47be66e0621650 | import logging
import re
import shutil
import subprocess
from collections import OrderedDict
import traceback
from pathlib import Path
import numpy as np
import pandas as pd
import one.alf.io as alfio
from ibllib.misc import check_nvidia_driver
from ibllib.ephys import ephysqc, spikes, sync_probes
from ibllib.io import ffmpeg, spikeglx
from ibllib.io.video import label_from_path
from ibllib.io.extractors import ephys_fpga, ephys_passive, camera
from ibllib.pipes import tasks
from ibllib.pipes.training_preprocessing import TrainingRegisterRaw as EphysRegisterRaw
from ibllib.pipes.misc import create_alyx_probe_insertions
from ibllib.qc.task_extractors import TaskQCExtractor
from ibllib.qc.task_metrics import TaskQC
from ibllib.qc.camera import run_all_qc as run_camera_qc
from ibllib.dsp import rms
from ibllib.io.extractors import signatures
_logger = logging.getLogger("ibllib")
# level 0
class EphysPulses(tasks.Task):
"""
Extract Pulses from raw electrophysiology data into numpy arrays
Perform the probes synchronisation with nidq (3B) or main probe (3A)
"""
cpu = 2
io_charge = 30 # this jobs reads raw ap files
priority = 90 # a lot of jobs depend on this one
level = 0 # this job doesn't depend on anything
def _run(self, overwrite=False):
# outputs numpy
syncs, out_files = ephys_fpga.extract_sync(self.session_path, overwrite=overwrite)
for out_file in out_files:
_logger.info(f"extracted pulses for {out_file}")
status, sync_files = sync_probes.sync(self.session_path)
return out_files + sync_files
class RawEphysQC(tasks.Task):
"""
Computes raw electrophysiology QC
"""
cpu = 2
io_charge = 30 # this jobs reads raw ap files
priority = 10 # a lot of jobs depend on this one
level = 0 # this job doesn't depend on anything
signature = {'input_files': signatures.RAWEPHYSQC, 'output_files': ()}
def _run(self, overwrite=False):
eid = self.one.path2eid(self.session_path)
pids = [x['id'] for x in self.one.alyx.rest('insertions', 'list', session=eid)]
# Usually there should be two probes, if there are less, check if all probes are registered
if len(pids) < 2:
_logger.warning(f"{len(pids)} probes registered for session {eid}, trying to register from local data")
pids = [p['id'] for p in create_alyx_probe_insertions(self.session_path, one=self.one)]
qc_files = []
for pid in pids:
try:
eqc = ephysqc.EphysQC(pid, session_path=self.session_path, one=self.one)
qc_files.extend(eqc.run(update=True, overwrite=overwrite))
except AssertionError:
self.status = -1
continue
return qc_files
class EphysAudio(tasks.Task):
"""
Compresses the microphone wav file in a lossless flac file
"""
cpu = 2
priority = 10 # a lot of jobs depend on this one
level = 0 # this job doesn't depend on anything
signature = {'input_files': ('_iblrig_micData.raw.wav', 'raw_behavior_data', True),
'output_files': ('_iblrig_micData.raw.flac', 'raw_behavior_data', True),
}
def _run(self, overwrite=False):
command = "ffmpeg -i {file_in} -y -nostdin -c:a flac -nostats {file_out}"
file_in = next(self.session_path.rglob("_iblrig_micData.raw.wav"), None)
if file_in is None:
return
file_out = file_in.with_suffix(".flac")
status, output_file = ffmpeg.compress(file_in=file_in, file_out=file_out, command=command)
return [output_file]
class SpikeSorting(tasks.Task):
"""
Pykilosort 2.5 pipeline
"""
gpu = 1
io_charge = 70 # this jobs reads raw ap files
priority = 60
level = 1 # this job doesn't depend on anything
SHELL_SCRIPT = Path.home().joinpath(
"Documents/PYTHON/iblscripts/deploy/serverpc/kilosort2/run_pykilosort.sh"
)
SPIKE_SORTER_NAME = 'pykilosort'
PYKILOSORT_REPO = Path.home().joinpath('Documents/PYTHON/SPIKE_SORTING/pykilosort')
signature = {'input_files': signatures.SPIKESORTING, 'output_files': ()}
@staticmethod
def _sample2v(ap_file):
md = spikeglx.read_meta_data(ap_file.with_suffix(".meta"))
s2v = spikeglx._conversion_sample2v_from_meta(md)
return s2v["ap"][0]
@staticmethod
def _fetch_pykilosort_version(repo_path):
init_file = Path(repo_path).joinpath('pykilosort', '__init__.py')
version = SpikeSorting._fetch_ks2_commit_hash(repo_path) # default
try:
with open(init_file) as fid:
lines = fid.readlines()
for line in lines:
if line.startswith("__version__ = "):
version = line.split('=')[-1].strip().replace('"', '').replace("'", '')
except Exception:
pass
return f"pykilosort_{version}"
@staticmethod
def _fetch_ks2_commit_hash(repo_path):
command2run = f"git --git-dir {repo_path}/.git rev-parse --verify HEAD"
process = subprocess.Popen(
command2run, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
info, error = process.communicate()
if process.returncode != 0:
_logger.error(
f"Can't fetch pykilsort commit hash, will still attempt to run \n"
f"Error: {error.decode('utf-8')}"
)
return ""
return info.decode("utf-8").strip()
def _run_pykilosort(self, ap_file):
"""
Runs the ks2 matlab spike sorting for one probe dataset
the raw spike sorting output is in session_path/spike_sorters/{self.SPIKE_SORTER_NAME}/probeXX folder
(discontinued support for old spike sortings in the probe folder <1.5.5)
:return: path of the folder containing ks2 spike sorting output
"""
self.version = self._fetch_pykilosort_version(self.PYKILOSORT_REPO)
label = ap_file.parts[-2] # this is usually the probe name
sorter_dir = self.session_path.joinpath("spike_sorters", self.SPIKE_SORTER_NAME, label)
FORCE_RERUN = False
if not FORCE_RERUN:
if sorter_dir.joinpath(f"spike_sorting_{self.SPIKE_SORTER_NAME}.log").exists():
_logger.info(f"Already ran: spike_sorting_{self.SPIKE_SORTER_NAME}.log"
f" found in {sorter_dir}, skipping.")
return sorter_dir
print(sorter_dir.joinpath(f"spike_sorting_{self.SPIKE_SORTER_NAME}.log"))
# get the scratch drive from the shell script
with open(self.SHELL_SCRIPT) as fid:
lines = fid.readlines()
line = [line for line in lines if line.startswith("SCRATCH_DRIVE=")][0]
m = re.search(r"\=(.*?)(\#|\n)", line)[0]
scratch_drive = Path(m[1:-1].strip())
assert scratch_drive.exists()
# clean up and create directory, this also checks write permissions
# temp_dir has the following shape: pykilosort/ZM_3003_2020-07-29_001_probe00
# first makes sure the tmp dir is clean
shutil.rmtree(scratch_drive.joinpath(self.SPIKE_SORTER_NAME), ignore_errors=True)
temp_dir = scratch_drive.joinpath(
self.SPIKE_SORTER_NAME, "_".join(list(self.session_path.parts[-3:]) + [label])
)
if temp_dir.exists(): # hmmm this has to be decided, we may want to restart ?
# But failed sessions may then clog the scratch dir and have users run out of space
shutil.rmtree(temp_dir, ignore_errors=True)
temp_dir.mkdir(parents=True, exist_ok=True)
check_nvidia_driver()
command2run = f"{self.SHELL_SCRIPT} {ap_file} {temp_dir}"
_logger.info(command2run)
process = subprocess.Popen(
command2run,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
executable="/bin/bash",
)
info, error = process.communicate()
info_str = info.decode("utf-8").strip()
_logger.info(info_str)
if process.returncode != 0:
error_str = error.decode("utf-8").strip()
# try and get the kilosort log if any
for log_file in temp_dir.rglob('*_kilosort.log'):
with open(log_file) as fid:
log = fid.read()
_logger.error(log)
break
raise RuntimeError(f"{self.SPIKE_SORTER_NAME} {info_str}, {error_str}")
shutil.copytree(temp_dir.joinpath('output'), sorter_dir, dirs_exist_ok=True)
shutil.rmtree(temp_dir, ignore_errors=True)
return sorter_dir
def _run(self, probes=None):
"""
Multiple steps. For each probe:
- Runs ks2 (skips if it already ran)
- synchronize the spike sorting
- output the probe description files
:param probes: (list of str) if provided, will only run spike sorting for specified probe names
:return: list of files to be registered on database
"""
efiles = spikeglx.glob_ephys_files(self.session_path)
ap_files = [(ef.get("ap"), ef.get("label")) for ef in efiles if "ap" in ef.keys()]
out_files = []
for ap_file, label in ap_files:
if isinstance(probes, list) and label not in probes:
continue
try:
ks2_dir = self._run_pykilosort(ap_file) # runs ks2, skips if it already ran
probe_out_path = self.session_path.joinpath("alf", label, self.SPIKE_SORTER_NAME)
shutil.rmtree(probe_out_path, ignore_errors=True)
probe_out_path.mkdir(parents=True, exist_ok=True)
spikes.ks2_to_alf(
ks2_dir,
bin_path=ap_file.parent,
out_path=probe_out_path,
bin_file=ap_file,
ampfactor=self._sample2v(ap_file),
)
logfile = ks2_dir.joinpath(f"spike_sorting_{self.SPIKE_SORTER_NAME}.log")
if logfile.exists():
shutil.copyfile(logfile, probe_out_path.joinpath(
f"_ibl_log.info_{self.SPIKE_SORTER_NAME}.log"))
out, _ = spikes.sync_spike_sorting(ap_file=ap_file, out_path=probe_out_path)
out_files.extend(out)
# convert ks2_output into tar file and also register
# Make this in case spike sorting is in old raw_ephys_data folders, for new
# sessions it should already exist
tar_dir = self.session_path.joinpath(
'spike_sorters', self.SPIKE_SORTER_NAME, label)
tar_dir.mkdir(parents=True, exist_ok=True)
out = spikes.ks2_to_tar(ks2_dir, tar_dir)
out_files.extend(out)
except BaseException:
_logger.error(traceback.format_exc())
self.status = -1
continue
probe_files = spikes.probes_description(self.session_path, one=self.one)
return out_files + probe_files
class EphysVideoCompress(tasks.Task):
priority = 40
level = 1
def _run(self, **kwargs):
# avi to mp4 compression
command = ('ffmpeg -i {file_in} -y -nostdin -codec:v libx264 -preset slow -crf 17 '
'-loglevel 0 -codec:a copy {file_out}')
output_files = ffmpeg.iblrig_video_compression(self.session_path, command)
if len(output_files) == 0:
_logger.info('No compressed videos found; skipping timestamp extraction')
return
labels = [label_from_path(x) for x in output_files]
# Video timestamps extraction
data, files = camera.extract_all(self.session_path, save=True, labels=labels)
output_files.extend(files)
# Video QC
run_camera_qc(self.session_path, update=True, one=self.one, cameras=labels)
return output_files
# level 1
class EphysTrials(tasks.Task):
priority = 90
level = 1
signature = {'input_files': signatures.EPHYSTRIALS, 'output_files': ()}
def _behaviour_criterion(self):
"""
Computes and update the behaviour criterion on Alyx
"""
from brainbox.behavior import training
trials = alfio.load_object(self.session_path.joinpath("alf"), "trials")
good_enough = training.criterion_delay(
n_trials=trials["intervals"].shape[0],
perf_easy=training.compute_performance_easy(trials),
)
eid = self.one.path2eid(self.session_path, query_type='remote')
self.one.alyx.json_field_update(
"sessions", eid, "extended_qc", {"behavior": int(good_enough)}
)
def _run(self):
dsets, out_files = ephys_fpga.extract_all(self.session_path, save=True)
if not self.one or self.one.offline:
return out_files
self._behaviour_criterion()
# Run the task QC
qc = TaskQC(self.session_path, one=self.one, log=_logger)
qc.extractor = TaskQCExtractor(self.session_path, lazy=True, one=qc.one)
# Extract extra datasets required for QC
qc.extractor.data = dsets
qc.extractor.extract_data()
# Aggregate and update Alyx QC fields
qc.run(update=True)
return out_files
class EphysCellsQc(tasks.Task):
priority = 90
level = 3
def _compute_cell_qc(self, folder_probe):
"""
Computes the cell QC given an extracted probe alf path
:param folder_probe: folder
:return:
"""
# compute the straight qc
_logger.info(f"Computing cluster qc for {folder_probe}")
spikes = alfio.load_object(folder_probe, 'spikes')
clusters = alfio.load_object(folder_probe, 'clusters')
df_units, drift = ephysqc.spike_sorting_metrics(
spikes.times, spikes.clusters, spikes.amps, spikes.depths,
cluster_ids=np.arange(clusters.channels.size))
# if the ks2 labels file exist, load them and add the column
file_labels = folder_probe.joinpath('cluster_KSLabel.tsv')
if file_labels.exists():
ks2_labels = pd.read_csv(file_labels, sep='\t')
ks2_labels.rename(columns={'KSLabel': 'ks2_label'}, inplace=True)
df_units = pd.concat(
[df_units, ks2_labels['ks2_label'].reindex(df_units.index)], axis=1)
# save as parquet file
df_units.to_parquet(folder_probe.joinpath("clusters.metrics.pqt"))
return folder_probe.joinpath("clusters.metrics.pqt"), df_units, drift
def _label_probe_qc(self, folder_probe, df_units, drift):
"""
Labels the json field of the alyx corresponding probe insertion
:param folder_probe:
:param df_units:
:param drift:
:return:
"""
eid = self.one.path2eid(self.session_path, query_type='remote')
# the probe name is the first folder after alf: {session_path}/alf/{probe_name}/{spike_sorter_name}
probe_name = Path(folder_probe).relative_to(self.session_path.joinpath('alf')).parts[0]
pdict = self.one.alyx.rest('insertions', 'list', session=eid, name=probe_name, no_cache=True)
if len(pdict) != 1:
_logger.warning(f'No probe found for probe name: {probe_name}')
return
isok = df_units['label'] == 1
qcdict = {'n_units': int(df_units.shape[0]),
'n_units_qc_pass': int(np.sum(isok)),
'firing_rate_max': np.max(df_units['firing_rate'][isok]),
'firing_rate_median': np.median(df_units['firing_rate'][isok]),
'amplitude_max_uV': np.max(df_units['amp_max'][isok]) * 1e6,
'amplitude_median_uV': np.max(df_units['amp_median'][isok]) * 1e6,
'drift_rms_um': rms(drift['drift_um']),
}
file_wm = folder_probe.joinpath('_kilosort_whitening.matrix.npy')
if file_wm.exists():
wm = np.load(file_wm)
qcdict['whitening_matrix_conditioning'] = np.linalg.cond(wm)
# groom qc dict (this function will eventually go directly into the json field update)
for k in qcdict:
if isinstance(qcdict[k], np.int64):
qcdict[k] = int(qcdict[k])
elif isinstance(qcdict[k], float):
qcdict[k] = np.round(qcdict[k], 2)
self.one.alyx.json_field_update("insertions", pdict[0]["id"], "json", qcdict)
def _run(self):
"""
Post spike-sorting quality control at the cluster level.
Outputs a QC table in the clusters ALF object and labels corresponding probes in Alyx
"""
files_spikes = Path(self.session_path).joinpath('alf').rglob('spikes.times.npy')
folder_probes = [f.parent for f in files_spikes]
out_files = []
for folder_probe in folder_probes:
try:
qc_file, df_units, drift = self._compute_cell_qc(folder_probe)
out_files.append(qc_file)
self._label_probe_qc(folder_probe, df_units, drift)
except Exception:
_logger.error(traceback.format_exc())
self.status = -1
continue
return out_files
class EphysMtscomp(tasks.Task):
priority = 50 # ideally after spike sorting
level = 0
def _run(self):
"""
Compress ephys files looking for `compress_ephys.flag` within the probes folder
Original bin file will be removed
The registration flag created contains targeted file names at the root of the session
"""
out_files = []
ephys_files = spikeglx.glob_ephys_files(self.session_path)
ephys_files += spikeglx.glob_ephys_files(self.session_path, ext="ch")
ephys_files += spikeglx.glob_ephys_files(self.session_path, ext="meta")
for ef in ephys_files:
for typ in ["ap", "lf", "nidq"]:
bin_file = ef.get(typ)
if not bin_file:
continue
if bin_file.suffix.find("bin") == 1:
with spikeglx.Reader(bin_file) as sr:
if sr.is_mtscomp:
out_files.append(bin_file)
else:
_logger.info(f"Compressing binary file {bin_file}")
out_files.append(sr.compress_file(keep_original=False))
out_files.append(bin_file.with_suffix('.ch'))
else:
out_files.append(bin_file)
return out_files
class EphysDLC(tasks.Task):
gpu = 1
cpu = 4
io_charge = 90
level = 2
def _run(self):
"""empty placeholder for job creation only"""
pass
class EphysPassive(tasks.Task):
cpu = 1
io_charge = 90
level = 1
signature = {'input_files': signatures.EPHYSPASSIVE, 'output_files': ()}
def _run(self):
"""returns a list of pathlib.Paths. """
data, paths = ephys_passive.PassiveChoiceWorld(self.session_path).extract(save=True)
if any([x is None for x in paths]):
self.status = -1
# Register?
return paths
class EphysExtractionPipeline(tasks.Pipeline):
label = __name__
def __init__(self, session_path=None, **kwargs):
super(EphysExtractionPipeline, self).__init__(session_path, **kwargs)
tasks = OrderedDict()
self.session_path = session_path
# level 0
tasks["EphysRegisterRaw"] = EphysRegisterRaw(self.session_path)
tasks["EphysPulses"] = EphysPulses(self.session_path)
tasks["EphysRawQC"] = RawEphysQC(self.session_path)
tasks["EphysAudio"] = EphysAudio(self.session_path)
tasks["EphysMtscomp"] = EphysMtscomp(self.session_path)
# level 1
tasks["SpikeSorting"] = SpikeSorting(
self.session_path, parents=[tasks["EphysMtscomp"], tasks["EphysPulses"]])
tasks["EphysVideoCompress"] = EphysVideoCompress(
self.session_path, parents=[tasks["EphysPulses"]])
tasks["EphysTrials"] = EphysTrials(self.session_path, parents=[tasks["EphysPulses"]])
tasks["EphysPassive"] = EphysPassive(self.session_path, parents=[tasks["EphysPulses"]])
# level 2
tasks["EphysCellsQc"] = EphysCellsQc(self.session_path, parents=[tasks["SpikeSorting"]])
tasks["EphysDLC"] = EphysDLC(self.session_path, parents=[tasks["EphysVideoCompress"]])
self.tasks = tasks
|
py | b41096ac06676e7a1aab143e86d32efb8530b840 | from annoyingbus.core import AnnoyingBus
from annoyingbus.information import Information
import os
import sys
import unittest
class TestInformation(unittest.TestCase):
def test_instance(self):
f = open(os.devnull, 'w')
sys.stdout = f # Removing side-effects of print
b = AnnoyingBus()
info = b.search()
self.assertEqual(isinstance(info, Information), True)
|
py | b41097953dad8aa9c8755c25860b177cdbff5b93 | # Copyright (c) 2016 Baidu, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Data Sources are helpers to define paddle training data or testing data.
"""
from paddle.trainer.config_parser import *
from .utils import deprecated
try:
import cPickle as pickle
except ImportError:
import pickle
__all__ = ['define_py_data_sources2']
def define_py_data_source(file_list,
cls,
module,
obj,
args=None,
async=False,
data_cls=PyData):
"""
Define a python data source.
For example, the simplest usage in trainer_config.py as follow:
.. code-block:: python
define_py_data_source("train.list", TrainData, "data_provider", "process")
Or. if you want to pass arguments from trainer_config to data_provider.py, then
.. code-block:: python
define_py_data_source("train.list", TrainData, "data_provider", "process",
args={"dictionary": dict_name})
:param data_cls:
:param file_list: file list name, which contains all data file paths
:type file_list: basestring
:param cls: Train or Test Class.
:type cls: TrainData or TestData
:param module: python module name.
:type module: basestring
:param obj: python object name. May be a function name if using
PyDataProviderWrapper.
:type obj: basestring
:param args: The best practice is using dict to pass arguments into
DataProvider, and use :code:`@init_hook_wrapper` to
receive arguments.
:type args: string or picklable object
:param async: Load Data asynchronously or not.
:type async: bool
:return: None
:rtype: None
"""
if isinstance(file_list, list):
file_list_name = 'train.list'
if isinstance(cls, TestData):
file_list_name = 'test.list'
with open(file_list_name, 'w') as f:
f.writelines(file_list)
file_list = file_list_name
if not isinstance(args, basestring) and args is not None:
args = pickle.dumps(args, 0)
if data_cls is None:
def py_data2(files, load_data_module, load_data_object, load_data_args,
**kwargs):
data = DataBase()
data.type = 'py2'
data.files = files
data.load_data_module = load_data_module
data.load_data_object = load_data_object
data.load_data_args = load_data_args
data.async_load_data = True
return data
data_cls = py_data2
cls(
data_cls(
files=file_list,
load_data_module=module,
load_data_object=obj,
load_data_args=args,
async_load_data=async))
def define_py_data_sources(train_list,
test_list,
module,
obj,
args=None,
train_async=False,
data_cls=PyData):
"""
The annotation is almost the same as define_py_data_sources2, except that
it can specific train_async and data_cls.
:param data_cls:
:param train_list: Train list name.
:type train_list: basestring
:param test_list: Test list name.
:type test_list: basestring
:param module: python module name. If train and test is different, then
pass a tuple or list to this argument.
:type module: basestring or tuple or list
:param obj: python object name. May be a function name if using
PyDataProviderWrapper. If train and test is different, then pass
a tuple or list to this argument.
:type obj: basestring or tuple or list
:param args: The best practice is using dict() to pass arguments into
DataProvider, and use :code:`@init_hook_wrapper` to receive
arguments. If train and test is different, then pass a tuple
or list to this argument.
:type args: string or picklable object or list or tuple.
:param train_async: Is training data load asynchronously or not.
:type train_async: bool
:return: None
:rtype: None
"""
def __is_splitable__(o):
return (isinstance(o, list) or
isinstance(o, tuple)) and hasattr(o, '__len__') and len(o) == 2
assert train_list is not None or test_list is not None
assert module is not None and obj is not None
test_module = module
train_module = module
if __is_splitable__(module):
train_module, test_module = module
test_obj = obj
train_obj = obj
if __is_splitable__(obj):
train_obj, test_obj = obj
if args is None:
args = ""
train_args = args
test_args = args
if __is_splitable__(args):
train_args, test_args = args
if train_list is not None:
define_py_data_source(train_list, TrainData, train_module, train_obj,
train_args, train_async, data_cls)
if test_list is not None:
define_py_data_source(test_list, TestData, test_module, test_obj,
test_args, False, data_cls)
def define_py_data_sources2(train_list, test_list, module, obj, args=None):
"""
Define python Train/Test data sources in one method. If train/test use
the same Data Provider configuration, module/obj/args contain one argument,
otherwise contain a list or tuple of arguments. For example\:
.. code-block:: python
define_py_data_sources2(train_list="train.list",
test_list="test.list",
module="data_provider"
# if train/test use different configurations,
# obj=["process_train", "process_test"]
obj="process",
args={"dictionary": dict_name})
The related data provider can refer to
`here <../../data_provider/pydataprovider2.html#dataprovider-for-the-sequential-model>`__.
:param train_list: Train list name.
:type train_list: basestring
:param test_list: Test list name.
:type test_list: basestring
:param module: python module name. If train and test is different, then
pass a tuple or list to this argument.
:type module: basestring or tuple or list
:param obj: python object name. May be a function name if using
PyDataProviderWrapper. If train and test is different, then pass
a tuple or list to this argument.
:type obj: basestring or tuple or list
:param args: The best practice is using dict() to pass arguments into
DataProvider, and use :code:`@init_hook_wrapper` to receive
arguments. If train and test is different, then pass a tuple
or list to this argument.
:type args: string or picklable object or list or tuple.
:return: None
:rtype: None
"""
define_py_data_sources(
train_list=train_list,
test_list=test_list,
module=module,
obj=obj,
args=args,
data_cls=None)
|
py | b41098231c52ebf4c3f9e7de0408534aeb807f06 | import parser
import CABR
# Sloppy Code but necessary in the circumstance
# Sets criterion for thresholding
global criterion
criterion = .25
abr = CABR.ABR(path='/Users/cx926/Desktop/CABR/Chunjie/', file_regex='ABR-*', ParsingClass=parser.RawABRthreshold)
# # abr.write.agf()
# # abr.write.threshold()
# # abr.get_experiment()
abr.plot.threshold(seperate_conditions=True, errbar = False)
# abr.plot.agf(8)
p = parser.RawABRthreshold('/Users/cx926/Desktop/CABR/Chunjie/Pre/57/ABR-70157-1')
# p.save_figure(None)
|
py | b41099b53d90bd7fefeecb631c345eb0da56e871 | from ui_tests.exporter.pages.BasePage import BasePage
class ApplicationEditTypePage(BasePage):
MAJOR_EDITS_RADIO_BUTTON = "edit-type-major"
CHANGE_APPLICATION_BTN = '.govuk-button[value="submit"]'
def click_major_edits_radio_button(self):
self.driver.find_element_by_id(self.MAJOR_EDITS_RADIO_BUTTON).click()
def click_change_application_button(self):
self.driver.find_element_by_css_selector(self.CHANGE_APPLICATION_BTN).click()
|
py | b4109a0584576b0f7353979c5a6f610e91732cb9 | # GUI Application automation and testing library
# Copyright (C) 2006-2017 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Base class for all wrappers in all backends"""
from __future__ import unicode_literals
from __future__ import print_function
import abc
import ctypes
import locale
import re
import time
import win32process
import six
try:
from PIL import ImageGrab
except ImportError:
ImageGrab = None
from . import keyboard
from . import win32defines, win32structures, win32functions
from .timings import Timings
from .actionlogger import ActionLogger
from .mouse import _perform_click_input
#=========================================================================
def remove_non_alphanumeric_symbols(s):
"""Make text usable for attribute name"""
return re.sub("\W", "_", s)
#=========================================================================
class InvalidElement(RuntimeError):
"""Raises when an invalid element is passed"""
pass
#=========================================================================
class ElementNotEnabled(RuntimeError):
"""Raised when an element is not enabled"""
pass
#=========================================================================
class ElementNotVisible(RuntimeError):
"""Raised when an element is not visible"""
pass
#=========================================================================
@six.add_metaclass(abc.ABCMeta)
class BaseMeta(abc.ABCMeta):
"""Abstract metaclass for Wrapper objects"""
@staticmethod
def find_wrapper(element):
"""Abstract static method to find an appropriate wrapper"""
raise NotImplementedError()
#=========================================================================
@six.add_metaclass(BaseMeta)
class BaseWrapper(object):
"""
Abstract wrapper for elements.
All other wrappers are derived from this.
"""
# Properties required for _MetaWrapper class
friendlyclassname = None
windowclasses = []
# Properties that describe type of the element
can_be_label = False
has_title = True
#------------------------------------------------------------
def __new__(cls, element_info):
return BaseWrapper._create_wrapper(cls, element_info, BaseWrapper)
#------------------------------------------------------------
@staticmethod
def _create_wrapper(cls_spec, element_info, myself):
"""Create a wrapper object according to the specified element info"""
# only use the meta class to find the wrapper for BaseWrapper
# so allow users to force the wrapper if they want
if cls_spec != myself:
obj = object.__new__(cls_spec)
obj.__init__(element_info)
return obj
new_class = cls_spec.find_wrapper(element_info)
obj = object.__new__(new_class)
obj.__init__(element_info)
return obj
#------------------------------------------------------------
def __init__(self, element_info, active_backend):
"""
Initialize the element
* **element_info** is instance of int or one of ElementInfo childs
"""
self.backend = active_backend
if element_info:
#if isinstance(element_info, six.integer_types):
# element_info = self.backend.element_info_class(element_info)
self._element_info = element_info
self.handle = self._element_info.handle
self._as_parameter_ = self.handle
self.ref = None
self.appdata = None
self._cache = {}
self.actions = ActionLogger()
else:
raise RuntimeError('NULL pointer was used to initialize BaseWrapper')
def __repr__(self):
"""Representation of the wrapper object
The method prints the following info:
* type name as a module name and a class name of the object
* title of the control or empty string
* friendly class name of the control
* unique ID of the control calculated as a hash value from a backend specific ID.
Notice that the reported title and class name can be used as hints to prepare
a windows specification to access the control, while the unique ID is more for
debugging purposes helping to distinguish between the runtime objects.
"""
return '<{0}, {1}>'.format(self.__str__(), self.__hash__())
def __str__(self):
"""Pretty print representation of the wrapper object
The method prints the following info:
* type name as a module name and class name of the object
* title of the wrapped control or empty string
* friendly class name of the wrapped control
Notice that the reported title and class name can be used as hints
to prepare a windows specification to access the control
"""
module = self.__class__.__module__
module = module[module.rfind('.') + 1:]
type_name = module + "." + self.__class__.__name__
try:
title = self.texts()[0]
except IndexError:
title = ""
class_name = self.friendly_class_name()
return "{0} - '{1}', {2}".format(type_name, title, class_name)
def __hash__(self):
"""Returns the hash value of the handle"""
# Must be implemented in a sub-class
raise NotImplementedError()
#------------------------------------------------------------
@property
def writable_props(self):
"""
Build the list of the default properties to be written.
Derived classes may override or extend this list depending
on how much control they need.
"""
props = ['class_name',
'friendly_class_name',
'texts',
'control_id',
'rectangle',
'is_visible',
'is_enabled',
'control_count',
]
return props
#------------------------------------------------------------
@property
def _needs_image_prop(self):
"""Specify whether we need to grab an image of ourselves when asked
for properties"""
return False
#------------------------------------------------------------
@property
def element_info(self):
"""Read-only property to get **ElementInfo** object"""
return self._element_info
#------------------------------------------------------------
def friendly_class_name(self):
"""
Return the friendly class name for the control
This differs from the class of the control in some cases.
class_name() is the actual 'Registered' element class of the control
while friendly_class_name() is hopefully something that will make
more sense to the user.
For example Checkboxes are implemented as Buttons - so the class
of a CheckBox is "Button" - but the friendly class is "CheckBox"
"""
if self.friendlyclassname is None:
self.friendlyclassname = self.element_info.class_name
return self.friendlyclassname
#------------------------------------------------------------
def class_name(self):
"""Return the class name of the elenemt"""
return self.element_info.class_name
#------------------------------------------------------------
def window_text(self):
"""
Window text of the element
Quite a few contorls have other text that is visible, for example
Edit controls usually have an empty string for window_text but still
have text displayed in the edit window.
"""
return self.element_info.rich_text
#------------------------------------------------------------
def control_id(self):
"""
Return the ID of the element
Only controls have a valid ID - dialogs usually have no ID assigned.
The ID usually identified the control in the window - but there can
be duplicate ID's for example lables in a dialog may have duplicate
ID's.
"""
return self.element_info.control_id
#------------------------------------------------------------
def is_visible(self):
"""
Whether the element is visible or not
Checks that both the top level parent (probably dialog) that
owns this element and the element itself are both visible.
If you want to wait for an element to become visible (or wait
for it to become hidden) use ``Application.wait('visible')`` or
``Application.wait_not('visible')``.
If you want to raise an exception immediately if an element is
not visible then you can use the BaseWrapper.verify_visible().
BaseWrapper.verify_actionable() raises if the element is not both
visible and enabled.
"""
return self.element_info.visible #and self.top_level_parent().element_info.visible
#------------------------------------------------------------
def is_enabled(self):
"""
Whether the element is enabled or not
Checks that both the top level parent (probably dialog) that
owns this element and the element itself are both enabled.
If you want to wait for an element to become enabled (or wait
for it to become disabled) use ``Application.wait('visible')`` or
``Application.wait_not('visible')``.
If you want to raise an exception immediately if an element is
not enabled then you can use the BaseWrapper.verify_enabled().
BaseWrapper.VerifyReady() raises if the window is not both
visible and enabled.
"""
return self.element_info.enabled #and self.top_level_parent().element_info.enabled
#------------------------------------------------------------
def rectangle(self):
"""
Return the rectangle of element
The rectangle() is the rectangle of the element on the screen.
Coordinates are given from the top left of the screen.
This method returns a RECT structure, Which has attributes - top,
left, right, bottom. and has methods width() and height().
See win32structures.RECT for more information.
"""
return self.element_info.rectangle
#------------------------------------------------------------
def client_to_screen(self, client_point):
"""Maps point from client to screen coordinates"""
# Use a direct call to element_info.rectangle instead of self.rectangle
# because the latter can be overriden in one of derived wrappers
# (see _treeview_element.rectangle or _listview_item.rectangle)
rect = self.element_info.rectangle
if isinstance(client_point, win32structures.POINT):
return (client_point.x + rect.left, client_point.y + rect.top)
else:
return (client_point[0] + rect.left, client_point[1] + rect.top)
#-----------------------------------------------------------
def process_id(self):
"Return the ID of process that owns this window"
return self.element_info.process_id
#-----------------------------------------------------------
def is_dialog(self):
"Return true if the control is a top level window"
if self.parent():
return self == self.top_level_parent()
else:
return False
#-----------------------------------------------------------
def parent(self):
"""
Return the parent of this element
Note that the parent of a control is not necesarily a dialog or
other main window. A group box may be the parent of some radio
buttons for example.
To get the main (or top level) window then use
BaseWrapper.top_level_parent().
"""
parent_elem = self.element_info.parent
if parent_elem:
return self.backend.generic_wrapper_class(parent_elem)
else:
return None
#-----------------------------------------------------------
def root(self):
"Return wrapper for root element (desktop)"
return self.backend.generic_wrapper_class(self.backend.element_info_class())
#-----------------------------------------------------------
def top_level_parent(self):
"""
Return the top level window of this control
The TopLevel parent is different from the parent in that the parent
is the element that owns this element - but it may not be a dialog/main
window. For example most Comboboxes have an Edit. The ComboBox is the
parent of the Edit control.
This will always return a valid window element (if the control has
no top level parent then the control itself is returned - as it is
a top level window already!)
"""
if not ("top_level_parent" in self._cache.keys()):
parent = self.parent()
if parent:
if self.parent() == self.root():
self._cache["top_level_parent"] = self
else:
return self.parent().top_level_parent()
else:
self._cache["top_level_parent"] = self
return self._cache["top_level_parent"]
#-----------------------------------------------------------
def texts(self):
"""
Return the text for each item of this control
It is a list of strings for the control. It is frequently overridden
to extract all strings from a control with multiple items.
It is always a list with one or more strings:
* The first element is the window text of the control
* Subsequent elements contain the text of any items of the
control (e.g. items in a listbox/combobox, tabs in a tabcontrol)
"""
texts_list = [self.window_text(), ]
return texts_list
#-----------------------------------------------------------
def children(self, **kwargs):
"""
Return the children of this element as a list
It returns a list of BaseWrapper (or subclass) instances.
An empty list is returned if there are no children.
"""
child_elements = self.element_info.children(**kwargs)
return [self.backend.generic_wrapper_class(element_info) for element_info in child_elements]
#-----------------------------------------------------------
def descendants(self, **kwargs):
"""
Return the descendants of this element as a list
It returns a list of BaseWrapper (or subclass) instances.
An empty list is returned if there are no descendants.
"""
desc_elements = self.element_info.descendants(**kwargs)
return [self.backend.generic_wrapper_class(element_info) for element_info in desc_elements]
#-----------------------------------------------------------
def control_count(self):
"Return the number of children of this control"
return len(self.element_info.children(process=self.process_id()))
#-----------------------------------------------------------
def capture_as_image(self, rect=None):
"""
Return a PIL image of the control.
See PIL documentation to know what you can do with the resulting
image.
"""
control_rectangle = self.rectangle()
if not (control_rectangle.width() and control_rectangle.height()):
return None
# PIL is optional so check first
if not ImageGrab:
print("PIL does not seem to be installed. "
"PIL is required for capture_as_image")
self.actions.log("PIL does not seem to be installed. "
"PIL is required for capture_as_image")
return None
# get the control rectangle in a way that PIL likes it
if rect:
box = (rect.left, rect.top, rect.right, rect.bottom)
else:
box = (control_rectangle.left,
control_rectangle.top,
control_rectangle.right,
control_rectangle.bottom)
# grab the image and get raw data as a string
return ImageGrab.grab(box)
#-----------------------------------------------------------
def get_properties(self):
"""Return the properties of the control as a dictionary."""
props = {}
# for each of the properties that can be written out
for propname in self.writable_props:
# set the item in the props dictionary keyed on the propname
props[propname] = getattr(self, propname)()
if self._needs_image_prop:
props["image"] = self.capture_as_image()
return props
#-----------------------------------------------------------
def draw_outline(
self,
colour='green',
thickness=2,
fill=win32defines.BS_NULL,
rect=None):
"""
Draw an outline around the window.
* **colour** can be either an integer or one of 'red', 'green', 'blue'
(default 'green')
* **thickness** thickness of rectangle (default 2)
* **fill** how to fill in the rectangle (default BS_NULL)
* **rect** the coordinates of the rectangle to draw (defaults to
the rectangle of the control)
"""
# don't draw if dialog is not visible
if not self.is_visible():
return
colours = {
"green": 0x00ff00,
"blue": 0xff0000,
"red": 0x0000ff,
}
# if it's a known colour
if colour in colours:
colour = colours[colour]
if rect is None:
rect = self.rectangle()
# create the pen(outline)
pen_handle = win32functions.CreatePen(
win32defines.PS_SOLID, thickness, colour)
# create the brush (inside)
brush = win32structures.LOGBRUSH()
brush.lbStyle = fill
brush.lbHatch = win32defines.HS_DIAGCROSS
brush_handle = win32functions.CreateBrushIndirect(ctypes.byref(brush))
# get the Device Context
dc = win32functions.CreateDC("DISPLAY", None, None, None )
# push our objects into it
win32functions.SelectObject(dc, brush_handle)
win32functions.SelectObject(dc, pen_handle)
# draw the rectangle to the DC
win32functions.Rectangle(
dc, rect.left, rect.top, rect.right, rect.bottom)
# Delete the brush and pen we created
win32functions.DeleteObject(brush_handle)
win32functions.DeleteObject(pen_handle)
# delete the Display context that we created
win32functions.DeleteDC(dc)
#-----------------------------------------------------------
def is_child(self, parent):
"""
Return True if this element is a child of 'parent'.
An element is a child of another element when it is a direct of the
other element. An element is a direct descendant of a given
element if the parent element is the the chain of parent elements
for the child element.
"""
return self in parent.children(class_name = self.class_name())
#-----------------------------------------------------------
def __eq__(self, other):
"Returns true if 2 BaseWrapper's describe 1 actual element"
if hasattr(other, "element_info"):
return self.element_info == other.element_info
else:
return self.element_info == other
#-----------------------------------------------------------
def __ne__(self, other):
"Returns False if the elements described by 2 BaseWrapper's are different"
return not self == other
#-----------------------------------------------------------
def verify_actionable(self):
"""
Verify that the element is both visible and enabled
Raise either ElementNotEnalbed or ElementNotVisible if not
enabled or visible respectively.
"""
self.wait_for_idle()
self.verify_visible()
self.verify_enabled()
#-----------------------------------------------------------
def verify_enabled(self):
"""
Verify that the element is enabled
Check first if the element's parent is enabled (skip if no parent),
then check if element itself is enabled.
"""
if not self.is_enabled():
raise ElementNotEnabled()
#-----------------------------------------------------------
def verify_visible(self):
"""
Verify that the element is visible
Check first if the element's parent is visible. (skip if no parent),
then check if element itself is visible.
"""
if not self.is_visible():
raise ElementNotVisible()
#-----------------------------------------------------------
def click_input(
self,
button = "left",
coords = (None, None),
button_down = True,
button_up = True,
double = False,
wheel_dist = 0,
use_log = True,
pressed = "",
absolute = False,
key_down = True,
key_up = True):
"""Click at the specified coordinates
* **button** The mouse button to click. One of 'left', 'right',
'middle' or 'x' (Default: 'left', 'move' is a special case)
* **coords** The coordinates to click at.(Default: the center of the control)
* **double** Whether to perform a double click or not (Default: False)
* **wheel_dist** The distance to move the mouse wheel (default: 0)
NOTES:
This is different from click method in that it requires the control
to be visible on the screen but performs a more realistic 'click'
simulation.
This method is also vulnerable if the mouse is moved by the user
as that could easily move the mouse off the control before the
click_input has finished.
"""
if self.is_dialog():
self.set_focus()
if isinstance(coords, win32structures.RECT):
coords = coords.mid_point()
# allow points objects to be passed as the coords
elif isinstance(coords, win32structures.POINT):
coords = [coords.x, coords.y]
else:
coords = list(coords)
# set the default coordinates
if coords[0] is None:
coords[0] = int(self.rectangle().width() / 2)
if coords[1] is None:
coords[1] = int(self.rectangle().height() / 2)
if not absolute:
coords = self.client_to_screen(coords)
message = None
if use_log:
ctrl_text = self.window_text()
if ctrl_text is None:
ctrl_text = six.text_type(ctrl_text)
if button.lower() == 'move':
message = 'Moved mouse over ' + self.friendly_class_name() + \
' "' + ctrl_text + '" to screen point ('
else:
message = 'Clicked ' + self.friendly_class_name() + ' "' + ctrl_text + \
'" by ' + str(button) + ' button mouse click at '
if double:
message = 'Double-c' + message[1:]
message += str(tuple(coords))
_perform_click_input(button, coords, double, button_down, button_up,
wheel_dist=wheel_dist, pressed=pressed,
key_down=key_down, key_up=key_up)
if message:
self.actions.log(message)
#-----------------------------------------------------------
def double_click_input(self, button ="left", coords = (None, None)):
"""Double click at the specified coordinates"""
self.click_input(button, coords, double=True)
#-----------------------------------------------------------
def right_click_input(self, coords = (None, None)):
"""Right click at the specified coords"""
self.click_input(button='right', coords=coords)
#-----------------------------------------------------------
def press_mouse_input(
self,
button = "left",
coords = (None, None),
pressed = "",
absolute = True,
key_down = True,
key_up = True
):
"""Press a mouse button using SendInput"""
self.click_input(
button=button,
coords=coords,
button_down=True,
button_up=False,
pressed=pressed,
absolute=absolute,
key_down=key_down,
key_up=key_up
)
#-----------------------------------------------------------
def release_mouse_input(
self,
button = "left",
coords = (None, None),
pressed = "",
absolute = True,
key_down = True,
key_up = True
):
"""Release the mouse button"""
self.click_input(
button,
coords,
button_down=False,
button_up=True,
pressed=pressed,
absolute=absolute,
key_down=key_down,
key_up=key_up
)
#-----------------------------------------------------------
def move_mouse_input(self, coords=(0, 0), pressed="", absolute=True):
"""Move the mouse"""
if not absolute:
self.actions.log('Moving mouse to relative (client) coordinates ' + str(coords).replace('\n', ', '))
self.click_input(button='move', coords=coords, absolute=absolute, pressed=pressed)
self.wait_for_idle()
return self
# -----------------------------------------------------------
def _calc_click_coords(self):
"""A helper that tries to get click coordinates of the control
The calculated coordinates are absolute and returned as
a tuple with x and y values.
"""
coords = self.rectangle().mid_point()
return (coords.x, coords.y)
# -----------------------------------------------------------
def drag_mouse_input(self,
dst=(0, 0),
src=None,
button="left",
pressed="",
absolute=True):
"""Click on **src**, drag it and drop on **dst**
* **dst** is a destination wrapper object or just coordinates.
* **src** is a source wrapper object or coordinates.
If **src** is None the self is used as a source object.
* **button** is a mouse button to hold during the drag.
It can be "left", "right", "middle" or "x"
* **pressed** is a key on the keyboard to press during the drag.
* **absolute** specifies whether to use absolute coordinates
for the mouse pointer locations
"""
if not src:
src = self
if dst is src:
raise AttributeError("Can't drag-n-drop on itself")
if isinstance(src, BaseWrapper):
press_coords = src._calc_click_coords()
elif isinstance(src, win32structures.POINT):
press_coords = (src.x, src.y)
else:
press_coords = src
if isinstance(dst, BaseWrapper):
release_coords = dst._calc_click_coords()
elif isinstance(dst, win32structures.POINT):
release_coords = (dst.x, dst.y)
else:
release_coords = dst
self.actions.log('Drag mouse from coordinates {0} to {1}'.format(press_coords, release_coords))
self.press_mouse_input(button, press_coords, pressed, absolute=absolute)
time.sleep(Timings.before_drag_wait)
for i in range(5):
self.move_mouse_input((press_coords[0] + i, press_coords[1]), pressed=pressed, absolute=absolute) # "left"
time.sleep(Timings.drag_n_drop_move_mouse_wait)
self.move_mouse_input(release_coords, pressed=pressed, absolute=absolute) # "left"
time.sleep(Timings.before_drop_wait)
self.release_mouse_input(button, release_coords, pressed, absolute=absolute)
time.sleep(Timings.after_drag_n_drop_wait)
return self
#-----------------------------------------------------------
def wheel_mouse_input(self, coords = (None, None), wheel_dist = 1, pressed =""):
"""Do mouse wheel"""
self.click_input(button='wheel', coords=coords, wheel_dist=wheel_dist, pressed=pressed)
return self
#-----------------------------------------------------------
def wait_for_idle(self):
"""Backend specific function to wait for idle state of a thread or a window"""
pass # do nothing by deafault
# TODO: implement wait_for_idle for backend="uia"
#-----------------------------------------------------------
def type_keys(
self,
keys,
pause = None,
with_spaces = False,
with_tabs = False,
with_newlines = False,
turn_off_numlock = True,
set_foreground = True):
"""
Type keys to the element using keyboard.SendKeys
This uses the re-written keyboard_ python module where you can
find documentation on what to use for the **keys**.
.. _keyboard: pywinauto.keyboard.html
"""
self.verify_actionable()
friendly_class_name = self.friendly_class_name()
if pause is None:
pause = Timings.after_sendkeys_key_wait
if set_foreground:
self.set_focus()
# attach the Python process with the process that self is in
if self.element_info.handle:
window_thread_id, _ = win32process.GetWindowThreadProcessId(int(self.handle))
win32functions.AttachThreadInput(win32functions.GetCurrentThreadId(), window_thread_id, win32defines.TRUE)
# TODO: check return value of AttachThreadInput properly
else:
# TODO: UIA stuff maybe
pass
if isinstance(keys, six.text_type):
aligned_keys = keys
elif isinstance(keys, six.binary_type):
aligned_keys = keys.decode(locale.getpreferredencoding())
else:
# convert a non-string input
aligned_keys = six.text_type(keys)
# Play the keys to the active window
keyboard.SendKeys(
aligned_keys,
pause,
with_spaces,
with_tabs,
with_newlines,
turn_off_numlock)
# detach the python process from the window's process
if self.element_info.handle:
win32functions.AttachThreadInput(win32functions.GetCurrentThreadId(), window_thread_id, win32defines.FALSE)
# TODO: check return value of AttachThreadInput properly
else:
# TODO: UIA stuff
pass
self.wait_for_idle()
self.actions.log('Typed text to the ' + friendly_class_name + ': ' + aligned_keys)
return self
#-----------------------------------------------------------
def set_focus(self):
"""Set the focus to this element"""
pass
#====================================================================
|
py | b4109a2f3d812efb67e8db5136667bca203281b4 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import re
tools_url = 'https://github.com/ROCm-Developer-Tools'
compute_url = 'https://github.com/RadeonOpenCompute'
aomp = [
"e4526489833896bbc47ba865e0d115fab278ce269789a8c99a97f444595f5f6a",
"970374c3acb9dda8b9a17d7a579dbaab48fac731db8fdce566a65abee37e5ed3",
"86f90d6505eccdb2840069cadf57f7111d4685653c4974cf65fb22b172e55478",
"14fc6867af0b17e3bff8cb42cb36f509c95a29b7a933a106bf6778de21f6c123",
"ce29cead5391a4a13f2c567e2e059de9291888d24985460725e43a91b740be7a"
]
devlib = [
"dce3a4ba672c4a2da4c2260ee4dc96ff6dd51877f5e7e1993cb107372a35a378",
"b3a114180bf184b3b829c356067bc6a98021d52c1c6f9db6bc57272ebafc5f1d",
"e82cc9a8eb7d92de02cabb856583e28f17a05c8cf9c97aec5275608ef1a38574",
"c99f45dacf5967aef9a31e3731011b9c142446d4a12bac69774998976f2576d7",
"bca9291385d6bdc91a8b39a46f0fd816157d38abb1725ff5222e6a0daa0834cc"
]
llvm = [
"b4fd7305dc57887eec17cce77bbf42215db46a4a3d14d8e517ab92f4e200b29d",
"89b967de5e79f6df7c62fdc12529671fa30989ae7b634d5a7c7996629ec1140e",
"98deabedb6cb3067ee960a643099631902507f236e4d9dc65b3e0f8d659eb55c",
"f0a0b9fec0626878340a15742e73a56f155090011716461edcb069dcf05e6b30",
"3ff18a8bd31d5b55232327e574dfa3556cf26787e105d0ba99411c5687325a8d"
]
flang = [
"cc27f8bfb49257b7a4f0b03f4ba5e06a28dcb6c337065c4201b6075dd2d5bc48",
"1fe07a0da20eb66a2a2aa8d354bf95c6f216ec38cc4a051e98041e0d13c34b36",
"54cc6a9706dba6d7808258632ed40fa6493838edb309709d3b25e0f9b02507f8",
"43d57bcc87fab092ac242e36da62588a87b6fa91f9e81fdb330159497afdecb3",
"81674bf3c9d8fd9b16fb3e5c66a870537c25ff8302fc1b162ab9e95944167163"
]
extras = [
"5dbf27f58b8114318208b97ba99a90483b78eebbcad4117cac6881441977e855",
"adaf7670b2497ff3ac09636e0dd30f666a5a5b742ecdcb8551d722102dcfbd85",
"4460a4f4b03022947f536221483e85dcd9b07064a54516ec103a1939c3f587b5",
"014fca1fba54997c6db0e84822df274fb6807698b6856da4f737f38f10ab0e5d",
"ee146cff4b9ee7aae90d7bb1d6b4957839232be0e7dab1865e0ae39832f8f795"
]
# Used only for 3.5.0
hip = [
"86eb7749ff6f6c5f6851cd6c528504d42f9286967324a50dd0dd54a6a74cacc7"
]
vdi = [
"b21866c7c23dc536356db139b88b6beb3c97f58658836974a7fc167feb31ad7f"
]
opencl = [
"8963fcd5a167583b3db8b94363778d4df4593bfce8141e1d3c32a59fb64a0cf6"
]
versions = ['3.5.0', '3.7.0', '3.8.0', '3.9.0', '3.10.0']
versions_dict = dict()
hashes = [aomp, devlib, llvm, flang, extras]
hashes_35 = [aomp, devlib, llvm, flang, extras, hip, vdi, opencl]
components = ['aomp', 'devlib', 'llvm', 'flang', 'extras']
components_35 = [
'aomp', 'devlib', 'llvm', 'flang', 'extras', 'hip', 'vdi', 'opencl'
]
for outer_index, item in enumerate(versions):
if item == '3.5.0':
use_components = components_35
use_hashes = hashes_35
else:
use_components = components
use_hashes = hashes
for inner_index, component in enumerate(use_hashes):
versions_dict.setdefault(item, {})[use_components[inner_index]] = \
use_hashes[inner_index][outer_index]
class Aomp(Package):
"""llvm openmp compiler from AMD."""
homepage = tools_url + "/aomp"
url = tools_url + "/aomp/archive/rocm-3.10.0.tar.gz"
maintainers = ['srekolam', 'arjun-raj-kuppala', 'estewart08']
version('3.10.0', sha256=versions_dict['3.10.0']['aomp'])
version('3.9.0', sha256=versions_dict['3.9.0']['aomp'])
version('3.8.0', sha256=versions_dict['3.8.0']['aomp'])
version('3.7.0', sha256=versions_dict['3.7.0']['aomp'])
version('3.5.0', sha256=versions_dict['3.5.0']['aomp'])
# Cmake above 3.18 would fail the build on 3.5.0
depends_on('cmake@3:', type='build')
depends_on('cmake@3:3.17', when='@3.5.0', type='build')
# Python 2 is needed for 3.5.0 and 3.8.0, limit py-setuptools
# to avoid spec error
depends_on('[email protected]:2.8', when='@3.5.0:3.8.0', type='build')
depends_on('py-setuptools@:44', when='@3.5.0:3.8.0',
type='build')
depends_on('python@3:', type='build', when='@3.9.0:')
depends_on('py-setuptools', when='@3.9.0:', type='build')
depends_on('[email protected]:', type=('build', 'link'))
depends_on('py-pip', when='@3.8.0:', type='build')
depends_on('py-wheel', when='@3.8.0:', type=('build', 'run'))
depends_on('perl-data-dumper', type='build')
depends_on('awk', type='build')
depends_on('elfutils', type=('build', 'link'))
depends_on('libffi', type=('build', 'link'))
for ver in ['3.5.0', '3.7.0', '3.8.0', '3.9.0', '3.10.0']:
depends_on('hsakmt-roct@' + ver, type=('build', 'run'), when='@' + ver)
depends_on('comgr@' + ver, type='build', when='@' + ver)
depends_on('hsa-rocr-dev@' + ver, type=('build', 'run'),
when='@' + ver)
depends_on('rocm-device-libs@' + ver, type=('build', 'run'),
when='@' + ver)
if ver != '3.5.0':
depends_on('hip@' + ver, type=('build', 'run'), when='@' + ver)
depends_on('hip-rocclr@' + ver, type='build', when='@' + ver)
if ver == '3.9.0' or ver == '3.10.0':
depends_on('rocm-gdb@' + ver, type=('build', 'run'),
when='@' + ver)
resource(
name='rocm-device-libs',
url=compute_url +
'/ROCm-Device-Libs/archive/rocm-' + ver + '.tar.gz',
sha256=versions_dict[ver]['devlib'],
expand=True,
destination='aomp-dir',
placement='rocm-device-libs',
when='@' + ver)
resource(
name='amd-llvm-project',
url=tools_url + '/amd-llvm-project/archive/rocm-' + ver
+ '.tar.gz',
sha256=versions_dict[ver]['llvm'],
expand=True,
destination='aomp-dir',
placement='amd-llvm-project',
when='@' + ver)
resource(
name='flang',
url=tools_url + '/flang/archive/rocm-' + ver + '.tar.gz',
sha256=versions_dict[ver]['flang'],
expand=True,
destination='aomp-dir',
placement='flang',
when='@' + ver)
resource(
name='aomp-extras',
url=tools_url + '/aomp-extras/archive/rocm-' + ver + '.tar.gz',
sha256=versions_dict[ver]['extras'],
expand=True,
destination='aomp-dir',
placement='aomp-extras',
when='@' + ver)
if ver == '3.5.0':
resource(
name='hip-on-vdi',
url=tools_url + '/hip/archive/aomp-3.5.0.tar.gz',
sha256=versions_dict['3.5.0']['hip'],
expand=True,
destination='aomp-dir',
placement='hip-on-vdi',
when='@3.5.0')
resource(
name='vdi',
url=tools_url + '/rocclr/archive/aomp-3.5.0.tar.gz',
sha256=versions_dict['3.5.0']['vdi'],
expand=True,
destination='aomp-dir',
placement='vdi',
when='@3.5.0')
resource(
name='opencl-on-vdi',
sha256=versions_dict['3.5.0']['opencl'],
url=compute_url +
'/ROCm-OpenCL-Runtime/archive/aomp-3.5.0.tar.gz',
expand=True,
destination='aomp-dir',
placement='opencl-on-vdi',
when='@3.5.0')
# Copy source files over for debug build in 3.9.0
patch('0001-Add-cmake-option-for-copying-source-for-debugging.patch',
working_dir='aomp-dir/amd-llvm-project', when='@3.9.0:')
# Revert back to .amdgcn.bc naming scheme for 3.8.0
patch('0001-Add-amdgcn-to-devicelibs-bitcode-names-3.8.patch',
working_dir='aomp-dir/amd-llvm-project', when='@3.8.0')
# Revert back to .amdgcn.bc naming scheme for 3.7.0
patch('0001-Add-amdgcn-to-devicelibs-bitcode-names.patch',
working_dir='aomp-dir/amd-llvm-project', when='@3.7.0')
def patch(self):
# Make sure python2.7 is used for the generation of hip header
if self.spec.version == Version('3.5.0'):
kwargs = {'ignore_absent': False, 'backup': False, 'string': False}
with working_dir('aomp-dir/hip-on-vdi'):
match = '^#!/usr/bin/python'
python = self.spec['python'].command.path
substitute = "#!{python}".format(python=python)
files = [
'hip_prof_gen.py', 'vdi/hip_prof_gen.py'
]
filter_file(match, substitute, *files, **kwargs)
src = self.stage.source_path
libomptarget = '{0}/aomp-dir/amd-llvm-project/openmp/libomptarget'
aomp_extras = '{0}/aomp-dir/aomp-extras/aomp-device-libs'
flang = '{0}/aomp-dir/flang/'
if self.spec.version >= Version('3.9.0'):
filter_file(
'ADDITIONAL_VERSIONS 2.7', 'ADDITIONAL_VERSIONS 3',
flang.format(src) + 'CMakeLists.txt')
if self.spec.version >= Version('3.8.0'):
filter_file(
'{CMAKE_INSTALL_PREFIX}', '{HSA_INCLUDE}',
libomptarget.format(src) + '/hostrpc/services/CMakeLists.txt')
filter_file(
'CONFIG',
'CONFIG PATHS ${CMAKE_INSTALL_PREFIX} NO_DEFAULT_PATH',
libomptarget.format(src) + '/../libompd/test/CMakeLists.txt')
if self.spec.version != Version('3.5.0'):
filter_file(
'{ROCM_DIR}/aomp/amdgcn/bitcode', '{DEVICE_LIBS_DIR}',
libomptarget.format(src) + '/hostrpc/CMakeLists.txt',
libomptarget.format(src) + '/deviceRTLs/amdgcn/CMakeLists.txt')
if self.spec.version == Version('3.5.0'):
filter_file(
'{ROCM_DIR}/lib/bitcode', '{DEVICE_LIBS_DIR}',
libomptarget.format(src) +
'/deviceRTLs/hostcall/CMakeLists.txt')
filter_file(
'{ROCM_DIR}/lib/bitcode', '{DEVICE_LIBS_DIR}',
aomp_extras.format(src) + '/aompextras/CMakeLists.txt',
aomp_extras.format(src) + '/libm/CMakeLists.txt',
libomptarget.format(src) + '/deviceRTLs/amdgcn/CMakeLists.txt',
string=True)
filter_file(
r'${ROCM_DIR}/hsa/include ${ROCM_DIR}/hsa/include/hsa',
'${HSA_INCLUDE}/hsa/include ${HSA_INCLUDE}/hsa/include/hsa',
libomptarget.format(src) + '/plugins/hsa/CMakeLists.txt',
string=True)
filter_file(
'{ROCM_DIR}/hsa/lib', '{HSA_LIB}',
libomptarget.format(src) + '/plugins/hsa/CMakeLists.txt')
filter_file(
r'{ROCM_DIR}/lib\)',
'{HSAKMT_LIB})\nset(HSAKMT_LIB64 ${HSAKMT_LIB64})',
libomptarget.format(src) + '/plugins/hsa/CMakeLists.txt')
filter_file(
r'-L${LIBOMPTARGET_DEP_LIBHSAKMT_LIBRARIES_DIRS}',
'-L${LIBOMPTARGET_DEP_LIBHSAKMT_LIBRARIES_DIRS} -L${HSAKMT_LIB64}',
libomptarget.format(src) + '/plugins/hsa/CMakeLists.txt',
string=True)
filter_file(
r'-rpath,${LIBOMPTARGET_DEP_LIBHSAKMT_LIBRARIES_DIRS}',
'-rpath,${LIBOMPTARGET_DEP_LIBHSAKMT_LIBRARIES_DIRS}' +
',-rpath,${HSAKMT_LIB64}',
libomptarget.format(src) + '/plugins/hsa/CMakeLists.txt',
string=True)
filter_file(
'{ROCM_DIR}/include', '{COMGR_INCLUDE}',
libomptarget.format(src) + '/plugins/hsa/CMakeLists.txt')
filter_file(
'{ROCM_DIR}/include', '{COMGR_INCLUDE}',
libomptarget.format(src) + '/plugins/hsa/CMakeLists.txt')
filter_file(
r'-L${LLVM_LIBDIR}${OPENMP_LIBDIR_SUFFIX}',
'-L${LLVM_LIBDIR}${OPENMP_LIBDIR_SUFFIX} -L${COMGR_LIB}',
libomptarget.format(src) + '/plugins/hsa/CMakeLists.txt',
string=True)
filter_file(
r'rpath,${LLVM_LIBDIR}${OPENMP_LIBDIR_SUFFIX}',
'rpath,${LLVM_LIBDIR}${OPENMP_LIBDIR_SUFFIX}' +
'-Wl,-rpath,${COMGR_LIB}',
libomptarget.format(src) + '/plugins/hsa/CMakeLists.txt',
string=True)
def setup_run_environment(self, env):
devlibs_prefix = self.spec['rocm-device-libs'].prefix
aomp_prefix = self.spec['aomp'].prefix
env.set('HIP_DEVICE_LIB_PATH',
'{0}/amdgcn/bitcode'.format(format(devlibs_prefix)))
env.set('AOMP', '{0}'.format(format(aomp_prefix)))
def setup_build_environment(self, env):
aomp_prefix = self.spec['aomp'].prefix
env.set('AOMP', '{0}'.format(format(aomp_prefix)))
env.set('FC', '{0}/bin/flang'.format(format(aomp_prefix)))
env.set(
'GFXLIST',
'gfx700 gfx701 gfx801 gfx803 gfx900 gfx902 gfx906 gfx908')
def install(self, spec, prefix):
src = self.stage.source_path
gfx_list = "gfx700;gfx701;gfx801;gfx803;gfx900;gfx902;gfx906;gfx908"
aomp_prefix = self.spec['aomp'].prefix
devlibs_prefix = self.spec['rocm-device-libs'].prefix
hsa_prefix = self.spec['hsa-rocr-dev'].prefix
hsakmt_prefix = self.spec['hsakmt-roct'].prefix
comgr_prefix = self.spec['comgr'].prefix
opencl_src = '/aomp-dir/opencl-on-vdi/api/opencl'
omp_src = '/aomp-dir/amd-llvm-project/openmp'
debug_map_format = \
'-fdebug-prefix-map={0}{1}={2}'.format(src, omp_src, aomp_prefix)
if self.spec.version >= Version('3.9.0'):
bitcode_dir = '/amdgcn/bitcode'
else:
bitcode_dir = '/lib'
components = dict()
components['amd-llvm-project'] = [
'../aomp-dir/amd-llvm-project/llvm',
'-DLLVM_ENABLE_PROJECTS=clang;lld;compiler-rt',
'-DCMAKE_BUILD_TYPE=release',
'-DLLVM_ENABLE_ASSERTIONS=ON',
'-DLLVM_TARGETS_TO_BUILD=AMDGPU;X86',
'-DCMAKE_C_COMPILER={0}'.format(self.compiler.cc),
'-DCMAKE_CXX_COMPILER={0}'.format(self.compiler.cxx),
'-DCMAKE_ASM_COMPILER={0}'.format(self.compiler.cc),
'-DBUG_REPORT_URL=https://github.com/ROCm-Developer-Tools/aomp',
'-DLLVM_ENABLE_BINDINGS=OFF',
'-DLLVM_INCLUDE_BENCHMARKS=OFF',
'-DLLVM_BUILD_TESTS=OFF',
'-DLLVM_INCLUDE_TESTS=OFF',
'-DCLANG_INCLUDE_TESTS=OFF',
'-DCMAKE_VERBOSE_MAKEFILE=1',
'-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=FALSE'
]
if self.spec.version == Version('3.5.0'):
components['vdi'] = [
'../aomp-dir/vdi',
'-DUSE_COMGR_LIBRARY=yes',
'-DOPENCL_DIR={0}{1}'.format(src, opencl_src)
]
components['hip-on-vdi'] = [
'../aomp-dir/hip-on-vdi',
'-DVDI_ROOT={0}/aomp-dir/vdi'.format(src),
'-DHIP_COMPILER=clang',
'-DHIP_PLATFORM=vdi',
'-DVDI_DIR={0}/aomp-dir/vdi'.format(src),
'-DHSA_PATH={0}'.format(hsa_prefix),
'-DLIBVDI_STATIC_DIR={0}/spack-build-vdi'.format(src),
'-DCMAKE_CXX_FLAGS=-Wno-ignored-attributes'
]
components['aomp-extras'] = [
'../aomp-dir/aomp-extras',
'-DROCM_PATH=$ROCM_DIR ',
'-DDEVICE_LIBS_DIR={0}{1}'.format(devlibs_prefix, bitcode_dir),
'-DAOMP_STANDALONE_BUILD=0',
'-DDEVICELIBS_ROOT={0}/aomp-dir/rocm-device-libs'.format(src),
'-DCMAKE_VERBOSE_MAKEFILE=1'
]
openmp_common_args = [
'-DROCM_DIR={0}'.format(hsa_prefix),
'-DDEVICE_LIBS_DIR={0}{1}'.format(devlibs_prefix, bitcode_dir),
'-DAOMP_STANDALONE_BUILD=0',
'-DDEVICELIBS_ROOT={0}/aomp-dir/rocm-device-libs'.format(src),
'-DOPENMP_TEST_C_COMPILER={0}/bin/clang'.format(aomp_prefix),
'-DOPENMP_TEST_CXX_COMPILER={0}/bin/clang++'.format(aomp_prefix),
'-DLIBOMPTARGET_AMDGCN_GFXLIST={0}'.format(gfx_list),
'-DLIBOMP_COPY_EXPORTS=OFF',
'-DHSA_INCLUDE={0}'.format(hsa_prefix),
'-DHSA_LIB={0}/lib'.format(hsa_prefix),
'-DHSAKMT_LIB={0}/lib'.format(hsakmt_prefix),
'-DHSAKMT_LIB64={0}/lib64'.format(hsakmt_prefix),
'-DCOMGR_INCLUDE={0}/include'.format(comgr_prefix),
'-DCOMGR_LIB={0}/lib'.format(comgr_prefix),
'-DOPENMP_ENABLE_LIBOMPTARGET=1',
'-DOPENMP_ENABLE_LIBOMPTARGET_HSA=1'
]
components['openmp'] = ['../aomp-dir/amd-llvm-project/openmp']
components['openmp'] += openmp_common_args
components['openmp-debug'] = [
'../aomp-dir/amd-llvm-project/openmp',
'-DLIBOMPTARGET_NVPTX_DEBUG=ON',
'-DOPENMP_ENABLE_LIBOMPTARGET=1',
'-DOPENMP_ENABLE_LIBOMPTARGET_HSA=1'
'-DCMAKE_CXX_FLAGS=-g',
'-DCMAKE_C_FLAGS=-g'
]
if self.spec.version >= Version('3.9.0'):
components['openmp-debug'] += [
'-DENABLE_SOURCE_COPY=ON',
'-DOPENMP_SOURCE_DEBUG_MAP={0}'.format(debug_map_format)
]
if self.spec.version >= Version('3.8.0'):
components['openmp-debug'] += [
'-DLIBOMP_ARCH=x86_64',
'-DLIBOMP_OMP_VERSION=50',
'-DLIBOMP_OMPT_SUPPORT=ON',
'-DLIBOMP_USE_DEBUGGER=ON',
'-DLIBOMP_CFLAGS=-O0',
'-DLIBOMP_CPPFLAGS=-O0',
'-DLIBOMP_OMPD_ENABLED=ON',
'-DLIBOMP_OMPD_SUPPORT=ON',
'-DLIBOMP_OMPT_DEBUG=ON'
]
components['openmp-debug'] += openmp_common_args
flang_common_args = [
'-DLLVM_ENABLE_ASSERTIONS=ON',
'-DLLVM_CONFIG={0}/bin/llvm-config'.format(aomp_prefix),
'-DCMAKE_CXX_COMPILER={0}/bin/clang++'.format(aomp_prefix),
'-DCMAKE_C_COMPILER={0}/bin/clang'.format(aomp_prefix),
'-DCMAKE_Fortran_COMPILER={0}/bin/flang'.format(aomp_prefix),
'-DLLVM_TARGETS_TO_BUILD=AMDGPU;x86'
]
components['pgmath'] = ['../aomp-dir/flang/runtime/libpgmath']
components['pgmath'] += flang_common_args
components['flang'] = [
'../aomp-dir/flang',
'-DFLANG_OPENMP_GPU_AMD=ON',
'-DFLANG_OPENMP_GPU_NVIDIA=ON'
]
components['flang'] += flang_common_args
components['flang-runtime'] = [
'../aomp-dir/flang',
'-DLLVM_INSTALL_RUNTIME=ON',
'-DFLANG_BUILD_RUNTIME=ON',
'-DOPENMP_BUILD_DIR={0}/spack-build-openmp/runtime/src'.format(src)
]
components['flang-runtime'] += flang_common_args
if self.spec.version != Version('3.5.0'):
build_order = [
"amd-llvm-project", "aomp-extras",
"openmp", "openmp-debug", "pgmath", "flang", "flang-runtime"
]
elif self.spec.version == Version('3.5.0'):
build_order = [
"amd-llvm-project", "vdi", "hip-on-vdi", "aomp-extras",
"openmp", "openmp-debug", "pgmath", "flang", "flang-runtime"
]
# Override standard CMAKE_BUILD_TYPE
for arg in std_cmake_args:
found = re.search("CMAKE_BUILD_TYPE", arg)
if found:
std_cmake_args.remove(arg)
for component in build_order:
with working_dir('spack-build-{0}'.format(component), create=True):
cmake_args = components[component]
cmake_args.extend(std_cmake_args)
# OpenMP build needs to be run twice(Release, Debug)
if component == "openmp-debug":
cmake_args.append("-DCMAKE_BUILD_TYPE=Debug")
else:
cmake_args.append("-DCMAKE_BUILD_TYPE=Release")
cmake(*cmake_args)
make()
make("install")
|
py | b4109a4074288cc61f8bdb7ac7af37244b383d99 | """Tests GLMRegressor converter."""
import unittest
from distutils.version import StrictVersion
import numpy
from sklearn import linear_model
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.svm import LinearSVR
from skl2onnx import convert_sklearn
from skl2onnx.common.data_types import (
FloatTensorType, Int64TensorType, DoubleTensorType
)
from onnxruntime import __version__ as ort_version
from test_utils import dump_data_and_model, fit_regression_model
class TestGLMRegressorConverter(unittest.TestCase):
def test_model_linear_regression(self):
model, X = fit_regression_model(linear_model.LinearRegression())
model_onnx = convert_sklearn(
model, "linear regression",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnLinearRegression-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
@unittest.skipIf(
StrictVersion(ort_version) <= StrictVersion("0.4.0"),
reason="old onnxruntime does not support double")
def test_model_linear_regression64(self):
model, X = fit_regression_model(linear_model.LinearRegression())
model_onnx = convert_sklearn(model, "linear regression",
[("input", DoubleTensorType(X.shape))],
dtype=numpy.float64)
self.assertIsNotNone(model_onnx)
self.assertIn("elem_type: 11", str(model_onnx))
def test_model_linear_regression_int(self):
model, X = fit_regression_model(
linear_model.LinearRegression(), is_int=True)
model_onnx = convert_sklearn(
model, "linear regression",
[("input", Int64TensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnLinearRegressionInt-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_linear_regression_nointercept(self):
model, X = fit_regression_model(
linear_model.LinearRegression(fit_intercept=False))
model_onnx = convert_sklearn(
model, "linear regression",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnLinearRegressionNoIntercept-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_linear_svr(self):
model, X = fit_regression_model(LinearSVR())
model_onnx = convert_sklearn(
model, "linear SVR",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnLinearSvr-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_linear_svr_int(self):
model, X = fit_regression_model(LinearSVR(), is_int=True)
model_onnx = convert_sklearn(
model, "linear SVR",
[("input", Int64TensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnLinearSvrInt-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_ridge(self):
model, X = fit_regression_model(linear_model.Ridge())
model_onnx = convert_sklearn(
model, "ridge regression",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnRidge-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_ridge_int(self):
model, X = fit_regression_model(linear_model.Ridge(), is_int=True)
model_onnx = convert_sklearn(
model, "ridge regression",
[("input", Int64TensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnRidgeInt-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_sgd_regressor(self):
model, X = fit_regression_model(linear_model.SGDRegressor())
model_onnx = convert_sklearn(
model,
"scikit-learn SGD regression",
[("input", FloatTensorType([None, X.shape[1]]))],
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnSGDRegressor-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_sgd_regressor_int(self):
model, X = fit_regression_model(
linear_model.SGDRegressor(), is_int=True)
model_onnx = convert_sklearn(
model, "SGD regression",
[("input", Int64TensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnSGDRegressorInt-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_elastic_net_regressor(self):
model, X = fit_regression_model(linear_model.ElasticNet())
model_onnx = convert_sklearn(
model,
"scikit-learn elastic-net regression",
[("input", FloatTensorType([None, X.shape[1]]))],
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnElasticNet-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_elastic_net_cv_regressor(self):
model, X = fit_regression_model(linear_model.ElasticNetCV())
model_onnx = convert_sklearn(
model,
"scikit-learn elastic-net regression",
[("input", FloatTensorType([None, X.shape[1]]))],
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnElasticNetCV-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_elastic_net_regressor_int(self):
model, X = fit_regression_model(linear_model.ElasticNet(), is_int=True)
model_onnx = convert_sklearn(
model, "elastic net regression",
[("input", Int64TensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnElasticNetRegressorInt-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_lars(self):
model, X = fit_regression_model(linear_model.Lars())
model_onnx = convert_sklearn(
model, "lars",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnLars-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_lars_cv(self):
model, X = fit_regression_model(linear_model.LarsCV())
model_onnx = convert_sklearn(
model, "lars",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnLarsCV-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_lasso_lars(self):
model, X = fit_regression_model(linear_model.LassoLars(alpha=0.01))
model_onnx = convert_sklearn(
model, "lasso lars",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnLassoLars-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_lasso_lars_cv(self):
model, X = fit_regression_model(linear_model.LassoLarsCV())
model_onnx = convert_sklearn(
model, "lasso lars cv",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnLassoLarsCV-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_lasso_lars_ic(self):
model, X = fit_regression_model(linear_model.LassoLarsIC())
model_onnx = convert_sklearn(
model, "lasso lars cv",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnLassoLarsIC-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_lasso_cv(self):
model, X = fit_regression_model(linear_model.LassoCV())
model_onnx = convert_sklearn(
model, "lasso cv",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnLassoCV-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_lasso_lars_int(self):
model, X = fit_regression_model(linear_model.LassoLars(), is_int=True)
model_onnx = convert_sklearn(
model, "lasso lars",
[("input", Int64TensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnLassoLarsInt-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_multi_linear_regression(self):
model, X = fit_regression_model(linear_model.LinearRegression(),
n_targets=2)
model_onnx = convert_sklearn(
model, "linear regression",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
verbose=False,
basename="SklearnLinearRegression-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_ard_regression(self):
model, X = fit_regression_model(linear_model.ARDRegression())
model_onnx = convert_sklearn(
model, "ard regression",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnARDRegression-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_theilsen(self):
model, X = fit_regression_model(linear_model.TheilSenRegressor())
model_onnx = convert_sklearn(
model, "thiel-sen regressor",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnTheilSen-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_bayesian_ridge(self):
model, X = fit_regression_model(linear_model.BayesianRidge())
model_onnx = convert_sklearn(
model, "bayesian ridge",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnBayesianRidge-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_huber_regressor(self):
model, X = fit_regression_model(linear_model.HuberRegressor())
model_onnx = convert_sklearn(
model, "huber regressor",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnHuberRegressor-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_multi_task_lasso(self):
model, X = fit_regression_model(linear_model.MultiTaskLasso(),
n_targets=2)
model_onnx = convert_sklearn(
model, "multi-task lasso",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
verbose=False,
basename="SklearnMultiTaskLasso-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_multi_task_lasso_cv(self):
model, X = fit_regression_model(linear_model.MultiTaskLassoCV(),
n_targets=2)
model_onnx = convert_sklearn(
model, "mutli-task lasso cv",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
verbose=False,
basename="SklearnMultiTaskLassoCV-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_multi_task_elasticnet(self):
model, X = fit_regression_model(linear_model.MultiTaskElasticNet(),
n_targets=2)
model_onnx = convert_sklearn(
model, "multi-task elasticnet",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
verbose=False,
basename="SklearnMultiTaskElasticNet-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_orthogonal_matching_pursuit(self):
model, X = fit_regression_model(
linear_model.OrthogonalMatchingPursuit())
model_onnx = convert_sklearn(
model, "orthogonal matching pursuit",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
verbose=False,
basename="SklearnOrthogonalMatchingPursuit-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_passive_aggressive_regressor(self):
model, X = fit_regression_model(
linear_model.PassiveAggressiveRegressor())
model_onnx = convert_sklearn(
model, "passive aggressive regressor",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
verbose=False,
basename="SklearnPassiveAggressiveRegressor-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_ransac_regressor_default(self):
model, X = fit_regression_model(
linear_model.RANSACRegressor())
model_onnx = convert_sklearn(
model, "ransac regressor",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
verbose=False,
basename="SklearnRANSACRegressor-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_ransac_regressor_mlp(self):
model, X = fit_regression_model(
linear_model.RANSACRegressor(
base_estimator=MLPRegressor(solver='lbfgs')))
model_onnx = convert_sklearn(
model, "ransac regressor",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
verbose=False,
basename="SklearnRANSACRegressorMLP-Dec3",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_ransac_regressor_tree(self):
model, X = fit_regression_model(
linear_model.RANSACRegressor(
base_estimator=GradientBoostingRegressor()))
model_onnx = convert_sklearn(
model, "ransac regressor",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
verbose=False,
basename="SklearnRANSACRegressorTree-Dec3",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_multi_task_elasticnet_cv(self):
model, X = fit_regression_model(linear_model.MultiTaskElasticNetCV(),
n_targets=2)
model_onnx = convert_sklearn(
model, "multi-task elasticnet cv",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
verbose=False,
basename="SklearnMultiTaskElasticNetCV-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_model_orthogonal_matching_pursuit_cv(self):
model, X = fit_regression_model(
linear_model.OrthogonalMatchingPursuitCV())
model_onnx = convert_sklearn(
model, "orthogonal matching pursuit cv",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
verbose=False,
basename="SklearnOrthogonalMatchingPursuitCV-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
if __name__ == "__main__":
unittest.main()
|
py | b4109a84109325068cc1b47a3a35e5fedd345328 | import re
import inspect
import os
import six
IDENTIFIER = re.compile(r'[a-z_](\w)*$', re.IGNORECASE)
DEFAULT = {
# Server Specific Configurations
'server': {
'port': '8080',
'host': '0.0.0.0'
},
# Pecan Application Configurations
'app': {
'root': None,
'modules': [],
'static_root': 'public',
'template_path': '',
'force_canonical': True
}
}
class ConfigDict(dict):
pass
class Config(object):
'''
Base class for Pecan configurations.
Create a Pecan configuration object from a dictionary or a
filename.
:param conf_dict: A python dictionary to use for the configuration.
:param filename: A filename to use for the configuration.
'''
def __init__(self, conf_dict={}, filename=''):
self.__values__ = {}
self.__file__ = filename
self.update(conf_dict)
def empty(self):
self.__values__ = {}
def update(self, conf_dict):
'''
Updates this configuration with a dictionary.
:param conf_dict: A python dictionary to update this configuration
with.
'''
if isinstance(conf_dict, dict):
iterator = six.iteritems(conf_dict)
else:
iterator = iter(conf_dict)
for k, v in iterator:
if not IDENTIFIER.match(k):
raise ValueError('\'%s\' is not a valid indentifier' % k)
cur_val = self.__values__.get(k)
if isinstance(cur_val, Config):
cur_val.update(conf_dict[k])
else:
self[k] = conf_dict[k]
def get(self, attribute, default=None):
try:
return self[attribute]
except KeyError:
return default
def __dictify__(self, obj, prefix):
'''
Private helper method for to_dict.
'''
for k, v in obj.copy().items():
if prefix:
del obj[k]
k = "%s%s" % (prefix, k)
if isinstance(v, Config):
v = self.__dictify__(dict(v), prefix)
obj[k] = v
return obj
def to_dict(self, prefix=None):
'''
Converts recursively the Config object into a valid dictionary.
:param prefix: A string to optionally prefix all key elements in the
returned dictonary.
'''
conf_obj = dict(self)
return self.__dictify__(conf_obj, prefix)
def __getattr__(self, name):
try:
return self.__values__[name]
except KeyError:
msg = "'pecan.conf' object has no attribute '%s'" % name
raise AttributeError(msg)
def __getitem__(self, key):
return self.__values__[key]
def __setitem__(self, key, value):
if isinstance(value, dict) and not isinstance(value, ConfigDict):
if value.get('__force_dict__'):
del value['__force_dict__']
self.__values__[key] = ConfigDict(value)
else:
self.__values__[key] = Config(value, filename=self.__file__)
elif isinstance(value, six.string_types) and '%(confdir)s' in value:
confdir = os.path.dirname(self.__file__) or os.getcwd()
self.__values__[key] = value.replace('%(confdir)s', confdir)
else:
self.__values__[key] = value
def __iter__(self):
return six.iteritems(self.__values__)
def __dir__(self):
"""
When using dir() returns a list of the values in the config. Note:
This function only works in Python2.6 or later.
"""
return list(self.__values__.keys())
def __repr__(self):
return 'Config(%s)' % str(self.__values__)
def conf_from_file(filepath):
'''
Creates a configuration dictionary from a file.
:param filepath: The path to the file.
'''
abspath = os.path.abspath(os.path.expanduser(filepath))
conf_dict = {}
if not os.path.isfile(abspath):
raise RuntimeError('`%s` is not a file.' % abspath)
with open(abspath, 'rb') as f:
exec(compile(f.read(), abspath, 'exec'), globals(), conf_dict)
conf_dict['__file__'] = abspath
return conf_from_dict(conf_dict)
def get_conf_path_from_env():
'''
If the ``PECAN_CONFIG`` environment variable exists and it points to
a valid path it will return that, otherwise it will raise
a ``RuntimeError``.
'''
config_path = os.environ.get('PECAN_CONFIG')
if not config_path:
error = "PECAN_CONFIG is not set and " \
"no config file was passed as an argument."
elif not os.path.isfile(config_path):
error = "PECAN_CONFIG was set to an invalid path: %s" % config_path
else:
return config_path
raise RuntimeError(error)
def conf_from_dict(conf_dict):
'''
Creates a configuration dictionary from a dictionary.
:param conf_dict: The configuration dictionary.
'''
conf = Config(filename=conf_dict.get('__file__', ''))
for k, v in six.iteritems(conf_dict):
if k.startswith('__'):
continue
elif inspect.ismodule(v):
continue
conf[k] = v
return conf
def initconf():
'''
Initializes the default configuration and exposes it at
``pecan.configuration.conf``, which is also exposed at ``pecan.conf``.
'''
return conf_from_dict(DEFAULT)
def set_config(config, overwrite=False):
'''
Updates the global configuration.
:param config: Can be a dictionary containing configuration, or a string
which represents a (relative) configuration filename.
'''
if config is None:
config = get_conf_path_from_env()
# must be after the fallback other a bad fallback will incorrectly clear
if overwrite is True:
_runtime_conf.empty()
if isinstance(config, six.string_types):
config = conf_from_file(config)
_runtime_conf.update(config)
if config.__file__:
_runtime_conf.__file__ = config.__file__
elif isinstance(config, dict):
_runtime_conf.update(conf_from_dict(config))
else:
raise TypeError('%s is neither a dictionary or a string.' % config)
_runtime_conf = initconf()
|
py | b4109a99eedfb1a38d394b7decc0ea9f81853108 | """
LGB ts log_loss SKFold 10 symmetry_large_std_quantile_set
"""
import numpy as np
import pandas as pd
import os
from datetime import datetime
from timeit import default_timer as timer
from sklearn.model_selection import StratifiedKFold
import src.common as common
import src.config.constants as constants
import src.modeling.train_util as model
import src.munging.process_data_util as process_data
import src.common.com_util as util
if __name__ == "__main__":
common.set_timezone()
start = timer()
# Create RUN_ID
RUN_ID = datetime.now().strftime("%m%d_%H%M")
MODEL_NAME = os.path.basename(__file__).split(".")[0]
SEED = 42
EXP_DETAILS = "LGB ts log_loss SKFold 10 symmetry_large_std_quantile_set"
IS_TEST = False
PLOT_FEATURE_IMPORTANCE = False
N_SPLITS = 10
TARGET = "loss"
MODEL_TYPE = "lgb"
OBJECTIVE = "multiclass"
NUM_CLASSES = 43
METRIC = "multi_logloss"
BOOSTING_TYPE = "gbdt"
VERBOSE = 100
N_THREADS = 8
NUM_LEAVES = 31
MAX_DEPTH = -1
N_ESTIMATORS = 1000
LEARNING_RATE = 0.1
EARLY_STOPPING_ROUNDS = 100
lgb_params = {
"objective": OBJECTIVE,
"boosting_type": BOOSTING_TYPE,
"learning_rate": LEARNING_RATE,
"num_class": NUM_CLASSES,
"num_leaves": NUM_LEAVES,
"tree_learner": "serial",
"n_jobs": N_THREADS,
"seed": SEED,
"max_depth": MAX_DEPTH,
"max_bin": 255,
"metric": METRIC,
"verbose": -1,
}
LOGGER_NAME = "sub_1"
logger = common.get_logger(LOGGER_NAME, MODEL_NAME, RUN_ID, constants.LOG_DIR)
common.set_seed(SEED)
logger.info(f"Running for Model Number [{MODEL_NAME}] & [{RUN_ID}]")
common.update_tracking(
RUN_ID, "model_number", MODEL_NAME, drop_incomplete_rows=True
)
common.update_tracking(RUN_ID, "model_type", MODEL_TYPE)
common.update_tracking(RUN_ID, "is_test", IS_TEST)
common.update_tracking(RUN_ID, "n_estimators", N_ESTIMATORS)
common.update_tracking(RUN_ID, "learning_rate", LEARNING_RATE)
common.update_tracking(RUN_ID, "num_leaves", NUM_LEAVES)
common.update_tracking(RUN_ID, "early_stopping_rounds", EARLY_STOPPING_ROUNDS)
train_df, test_df, sample_submission_df = process_data.read_processed_data(
logger,
constants.PROCESSED_DATA_DIR,
train=True,
test=True,
sample_submission=True,
)
features_df = pd.read_parquet(
f"{constants.FEATURES_DATA_DIR}/cast/symmetry_large_std_quantile_set_cast.parquet"
)
logger.info(f"Shape of the features {features_df.shape}")
features_to_drop = [
"loan__symmetry_looking__r_0.0",
"loan__symmetry_looking__r_0.1",
"loan__symmetry_looking__r_0.15000000000000002",
"loan__symmetry_looking__r_0.2",
"loan__symmetry_looking__r_0.25",
"loan__symmetry_looking__r_0.30000000000000004",
"loan__symmetry_looking__r_0.35000000000000003",
"loan__symmetry_looking__r_0.4",
"loan__symmetry_looking__r_0.45",
"loan__symmetry_looking__r_0.5",
"loan__symmetry_looking__r_0.55",
"loan__symmetry_looking__r_0.6000000000000001",
"loan__symmetry_looking__r_0.65",
"loan__symmetry_looking__r_0.7000000000000001",
"loan__symmetry_looking__r_0.75",
"loan__symmetry_looking__r_0.8",
"loan__symmetry_looking__r_0.8500000000000001",
"loan__symmetry_looking__r_0.9",
"loan__symmetry_looking__r_0.9500000000000001",
"loan__large_standard_deviation__r_0.05",
"loan__large_standard_deviation__r_0.1",
"loan__large_standard_deviation__r_0.15000000000000002",
"loan__large_standard_deviation__r_0.30000000000000004",
"loan__large_standard_deviation__r_0.35000000000000003",
"loan__large_standard_deviation__r_0.4",
"loan__large_standard_deviation__r_0.45",
"loan__large_standard_deviation__r_0.5",
"loan__large_standard_deviation__r_0.55",
"loan__large_standard_deviation__r_0.6000000000000001",
"loan__large_standard_deviation__r_0.65",
"loan__large_standard_deviation__r_0.7000000000000001",
"loan__large_standard_deviation__r_0.75",
"loan__large_standard_deviation__r_0.8",
"loan__large_standard_deviation__r_0.8500000000000001",
"loan__large_standard_deviation__r_0.9",
"loan__large_standard_deviation__r_0.9500000000000001",
]
features_df = features_df.drop(features_to_drop, axis=1)
logger.info(f"Shape of the features after dropping {features_df.shape}")
train_X = features_df.iloc[0: len(train_df)]
train_Y = train_df["loss"]
test_X = features_df.iloc[len(train_df):]
logger.info("Adding additional rows for loss=42")
train_X_rare = train_X.loc[[96131, 131570, 212724]]
train_X = train_X.append(
[train_X_rare, train_X_rare, train_X_rare], ignore_index=True
)
train_Y_rare = train_Y.loc[[96131, 131570, 212724]]
train_Y = train_Y.append(
[train_Y_rare, train_Y_rare, train_Y_rare], ignore_index=True
)
logger.info(
f"Shape of train_X : {train_X.shape}, test_X: {test_X.shape}, train_Y: {train_Y.shape}"
)
predictors = list(train_X.columns)
sk = StratifiedKFold(n_splits=N_SPLITS, shuffle=True)
common.update_tracking(RUN_ID, "no_of_features", len(predictors), is_integer=True)
common.update_tracking(RUN_ID, "cv_method", "StratifiedKFold")
common.update_tracking(RUN_ID, "n_splits", N_SPLITS, is_integer=True)
results_dict = model.lgb_train_validate_on_cv(
logger=logger,
run_id=RUN_ID,
train_X=train_X,
train_Y=train_Y,
test_X=test_X,
metric="log_loss",
num_class=NUM_CLASSES,
kf=sk,
features=predictors,
params=lgb_params,
n_estimators=N_ESTIMATORS,
early_stopping_rounds=EARLY_STOPPING_ROUNDS,
cat_features="auto",
is_test=False,
verbose_eval=100,
)
train_index = train_X.index
# Since we are using multiclass classification with logloss as metric
# the prediction and y_oof consist of probablities for 43 classes.
# Convert those to a label representing the number of the class
results_dict_copy = results_dict.copy()
results_dict_copy["prediction"] = np.argmax(results_dict["prediction"], axis=1)
results_dict_copy["y_oof"] = np.argmax(results_dict["y_oof"], axis=1)
rmse_score = model._calculate_perf_metric(
"rmse", train_Y.values, results_dict_copy["y_oof"]
)
logger.info(f"RMSE score {rmse_score}")
util.update_tracking(run_id=RUN_ID, key="RMSE", value=rmse_score, is_integer=False)
common.save_artifacts(
logger,
is_test=False,
is_plot_fi=True,
result_dict=results_dict_copy,
submission_df=sample_submission_df,
train_index=train_index,
model_number=MODEL_NAME,
run_id=RUN_ID,
sub_dir=constants.SUBMISSION_DIR,
oof_dir=constants.OOF_DIR,
fi_dir=constants.FI_DIR,
fi_fig_dir=constants.FI_FIG_DIR,
)
end = timer()
common.update_tracking(RUN_ID, "training_time", end - start, is_integer=True)
common.update_tracking(RUN_ID, "comments", EXP_DETAILS)
logger.info("Execution Complete")
|
py | b4109ae8caf454aadc2fe08bf3d9c05990197e75 | import unittest
from SeleniumTestScripts.TC_Renter_SuccessfulLogin import TC_Renter_SuccessfulLogin
from SeleniumTestScripts.TC_Renter_UnsuccessfulLogin import TC_Renter_UnsuccessfulLogin
from SeleniumTestScripts.TC_Renter_Sign_up import TC_Renter_Sign_up
TC_Renter_Sign_up = unittest.TestLoader().loadTestsFromTestCase(TC_Renter_Sign_up)
TC_Renter_SuccessfulLogin = unittest.TestLoader().loadTestsFromTestCase(TC_Renter_SuccessfulLogin)
TC_Renter_UnsuccessfulLogin = unittest.TestLoader().loadTestsFromTestCase(TC_Renter_UnsuccessfulLogin)
test_suite = unittest.TestSuite([TC_Renter_SuccessfulLogin, TC_Renter_UnsuccessfulLogin, TC_Renter_Sign_up])
unittest.TextTestRunner().run(test_suite)
|
py | b4109b005f2d47973b8b2f0acf5f8840ffb37fc8 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = ['EventHubArgs', 'EventHub']
@pulumi.input_type
class EventHubArgs:
def __init__(__self__, *,
namespace_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
event_hub_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
message_retention_in_days: Optional[pulumi.Input[float]] = None,
name: Optional[pulumi.Input[str]] = None,
partition_count: Optional[pulumi.Input[float]] = None,
status: Optional[pulumi.Input['EntityStatus']] = None,
type: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a EventHub resource.
:param pulumi.Input[str] namespace_name: The Namespace name
:param pulumi.Input[str] resource_group_name: Name of the resource group within the azure subscription.
:param pulumi.Input[str] event_hub_name: The Event Hub name
:param pulumi.Input[str] location: Location of the resource.
:param pulumi.Input[float] message_retention_in_days: Number of days to retain the events for this Event Hub.
:param pulumi.Input[str] name: Name of the Event Hub.
:param pulumi.Input[float] partition_count: Number of partitions created for the Event Hub.
:param pulumi.Input['EntityStatus'] status: Enumerates the possible values for the status of the Event Hub.
:param pulumi.Input[str] type: ARM type of the Namespace.
"""
pulumi.set(__self__, "namespace_name", namespace_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if event_hub_name is not None:
pulumi.set(__self__, "event_hub_name", event_hub_name)
if location is not None:
pulumi.set(__self__, "location", location)
if message_retention_in_days is not None:
pulumi.set(__self__, "message_retention_in_days", message_retention_in_days)
if name is not None:
pulumi.set(__self__, "name", name)
if partition_count is not None:
pulumi.set(__self__, "partition_count", partition_count)
if status is not None:
pulumi.set(__self__, "status", status)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="namespaceName")
def namespace_name(self) -> pulumi.Input[str]:
"""
The Namespace name
"""
return pulumi.get(self, "namespace_name")
@namespace_name.setter
def namespace_name(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of the resource group within the azure subscription.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="eventHubName")
def event_hub_name(self) -> Optional[pulumi.Input[str]]:
"""
The Event Hub name
"""
return pulumi.get(self, "event_hub_name")
@event_hub_name.setter
def event_hub_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "event_hub_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="messageRetentionInDays")
def message_retention_in_days(self) -> Optional[pulumi.Input[float]]:
"""
Number of days to retain the events for this Event Hub.
"""
return pulumi.get(self, "message_retention_in_days")
@message_retention_in_days.setter
def message_retention_in_days(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "message_retention_in_days", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Event Hub.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="partitionCount")
def partition_count(self) -> Optional[pulumi.Input[float]]:
"""
Number of partitions created for the Event Hub.
"""
return pulumi.get(self, "partition_count")
@partition_count.setter
def partition_count(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "partition_count", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input['EntityStatus']]:
"""
Enumerates the possible values for the status of the Event Hub.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input['EntityStatus']]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
ARM type of the Namespace.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
class EventHub(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
event_hub_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
message_retention_in_days: Optional[pulumi.Input[float]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
partition_count: Optional[pulumi.Input[float]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input['EntityStatus']] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Single item in List or Get Event Hub operation
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] event_hub_name: The Event Hub name
:param pulumi.Input[str] location: Location of the resource.
:param pulumi.Input[float] message_retention_in_days: Number of days to retain the events for this Event Hub.
:param pulumi.Input[str] name: Name of the Event Hub.
:param pulumi.Input[str] namespace_name: The Namespace name
:param pulumi.Input[float] partition_count: Number of partitions created for the Event Hub.
:param pulumi.Input[str] resource_group_name: Name of the resource group within the azure subscription.
:param pulumi.Input['EntityStatus'] status: Enumerates the possible values for the status of the Event Hub.
:param pulumi.Input[str] type: ARM type of the Namespace.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: EventHubArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Single item in List or Get Event Hub operation
:param str resource_name: The name of the resource.
:param EventHubArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(EventHubArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
event_hub_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
message_retention_in_days: Optional[pulumi.Input[float]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
partition_count: Optional[pulumi.Input[float]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input['EntityStatus']] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = EventHubArgs.__new__(EventHubArgs)
__props__.__dict__["event_hub_name"] = event_hub_name
__props__.__dict__["location"] = location
__props__.__dict__["message_retention_in_days"] = message_retention_in_days
__props__.__dict__["name"] = name
if namespace_name is None and not opts.urn:
raise TypeError("Missing required property 'namespace_name'")
__props__.__dict__["namespace_name"] = namespace_name
__props__.__dict__["partition_count"] = partition_count
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["status"] = status
__props__.__dict__["type"] = type
__props__.__dict__["created_at"] = None
__props__.__dict__["partition_ids"] = None
__props__.__dict__["updated_at"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:eventhub/v20150801:EventHub"), pulumi.Alias(type_="azure-native:eventhub:EventHub"), pulumi.Alias(type_="azure-nextgen:eventhub:EventHub"), pulumi.Alias(type_="azure-native:eventhub/v20140901:EventHub"), pulumi.Alias(type_="azure-nextgen:eventhub/v20140901:EventHub"), pulumi.Alias(type_="azure-native:eventhub/v20170401:EventHub"), pulumi.Alias(type_="azure-nextgen:eventhub/v20170401:EventHub"), pulumi.Alias(type_="azure-native:eventhub/v20180101preview:EventHub"), pulumi.Alias(type_="azure-nextgen:eventhub/v20180101preview:EventHub"), pulumi.Alias(type_="azure-native:eventhub/v20210101preview:EventHub"), pulumi.Alias(type_="azure-nextgen:eventhub/v20210101preview:EventHub"), pulumi.Alias(type_="azure-native:eventhub/v20210601preview:EventHub"), pulumi.Alias(type_="azure-nextgen:eventhub/v20210601preview:EventHub")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(EventHub, __self__).__init__(
'azure-native:eventhub/v20150801:EventHub',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'EventHub':
"""
Get an existing EventHub resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = EventHubArgs.__new__(EventHubArgs)
__props__.__dict__["created_at"] = None
__props__.__dict__["location"] = None
__props__.__dict__["message_retention_in_days"] = None
__props__.__dict__["name"] = None
__props__.__dict__["partition_count"] = None
__props__.__dict__["partition_ids"] = None
__props__.__dict__["status"] = None
__props__.__dict__["type"] = None
__props__.__dict__["updated_at"] = None
return EventHub(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> pulumi.Output[str]:
"""
Exact time the Event Hub was created.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="messageRetentionInDays")
def message_retention_in_days(self) -> pulumi.Output[Optional[float]]:
"""
Number of days to retain the events for this Event Hub.
"""
return pulumi.get(self, "message_retention_in_days")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="partitionCount")
def partition_count(self) -> pulumi.Output[Optional[float]]:
"""
Number of partitions created for the Event Hub.
"""
return pulumi.get(self, "partition_count")
@property
@pulumi.getter(name="partitionIds")
def partition_ids(self) -> pulumi.Output[Sequence[str]]:
"""
Current number of shards on the Event Hub.
"""
return pulumi.get(self, "partition_ids")
@property
@pulumi.getter
def status(self) -> pulumi.Output[Optional[str]]:
"""
Enumerates the possible values for the status of the Event Hub.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="updatedAt")
def updated_at(self) -> pulumi.Output[str]:
"""
The exact time the message was updated.
"""
return pulumi.get(self, "updated_at")
|
py | b4109cd43b48b7c6f19e5c51a82e556c7896bef4 | from typing import List
from PySide6.QtWidgets import QWidget, QGroupBox, QLabel, QVBoxLayout, QGridLayout
from common.Config import Config
from common.LogHolder import LogHolder
class HelpSettingsWidget(QWidget, LogHolder):
def __init__(self, config: Config) -> None:
QWidget.__init__(self)
LogHolder.__init__(self)
self.config = config
self.label_row_count = 0
group = QGroupBox("Help")
grid_layout = QGridLayout()
group.setLayout(grid_layout)
self.add_label_row(grid_layout, ["Left Click", "Move index finger near thumb."])
self.add_label_row(grid_layout, ["Right Click", "Move middle and ring finger near thumb."])
self.add_label_row(grid_layout, ["Middle Click", "Move ring finger and pinky near thumb."])
self.add_label_row(grid_layout, ["Scroll Up/Down", "Make thumb up/down gesture."])
self.add_label_row(grid_layout, ["Drag", "Hold left click gesture."])
# Layout
main_layout = QVBoxLayout()
self.setLayout(main_layout)
main_layout.addWidget(group)
main_layout.addWidget(group)
def add_label_row(self, grid_layout: QGridLayout, texts: List[str]) -> None:
row_span = 1
column = 0
for text in texts:
column_span = 1 if column == 0 else 3
label = QLabel(text)
if column == 0:
label.setProperty("cssClass", "title_column") # type: ignore
grid_layout.addWidget(label, self.label_row_count, column, row_span, column_span)
column = column + 1
self.label_row_count = self.label_row_count + 1 |
py | b4109d1ba40f01f03f9797a23354f0a0d0954d41 | """locallibrary URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.views.generic import RedirectView
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('catalog/', include('catalog.urls')),
path('', RedirectView.as_view(url='catalog/')),
path('accounts/', include('django.contrib.auth.urls')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
py | b4109f555670f800f2573fe8e7461d1255f4d892 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-06-21 05:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('services', '0048_servicetype_image'),
]
operations = [
migrations.AlterField(
model_name='servicetype',
name='image',
field=models.FilePathField(match='*.jpg', path='/static/images/samples/390x260', recursive=True),
),
]
|
py | b4109ff612d0f249a3dc1f369512f37d7823a132 | from simp_py import tft,lcd
from random import seed, uniform
import time
from machine import Pin
from button import Button
seed(int(time.time()))
class FOOD:
global lcd
def __init__(self):
self.pos=[10,20]
def new(self,trunk):
x = round(uniform(1,30))
y = round(uniform(1,20))
while True:
if [x,y] in trunk:
x = round(uniform(1,30))
y = round(uniform(1,20))
else:
break
self.pos=[x,y]
lcd.text(x*10,y*10,'*')
def is_catch(self,x,y):
if self.pos==[x,y]:
return True
class SNAKE:
global lcd,tft,uniform,food
def __init__(self):
self.trunk=[[4,10],[4,9],[4,8]]
self.dirx=0
self.diry=1
food.new(self.trunk)
self.draw()
def draw(self):
for x,y in self.trunk:
lcd.text(x*10,y*10,'#')
def go_dir(self,x,y):
x+= self.dirx
if x*10 > 320:
x=0
if x<0:
x=32
y+= self.diry
if y*10 > 240:
y=0
if y<0:
y=24
return x,y
def go_left(self):
self.dirx=-1
self.diry=0
def go_right(self):
self.dirx=1
self.diry=0
def go_down(self):
self.diry=1
self.dirx=0
def go_up(self):
self.diry=-1
self.dirx=0
def go(self):
xt,yt = self.trunk.pop()
lcd.textClear(xt*10,yt*10,'#')
x,y = self.trunk[0]
x,y = self.go_dir(x,y)
self.trunk.insert(0,[x,y])
if food.is_catch(x,y):
self.trunk.append([xt,yt])
food.new(self.trunk)
self.draw()
if __name__=='__main__':
lcd.clear()
food = FOOD()
snake=SNAKE()
def Apressed(v):
global snake,tft
print('Apressed')
snake.go_left()
def Bpressed(v):
global snake,tft
print('Bpressed')
tft.on()
if snake.diry==0:
snake.go_up()
else:
if snake.diry==1:
snake.go_up()
else:
snake.go_down()
def Cpressed(v):
global snake,btnA
print('Cpressed')
if btnA.isPressed():
tft.off()
snake.go_right()
btnA = Button(39,Apressed,trigger=Pin.IRQ_FALLING)
btnB = Button(38,Bpressed, trigger=Pin.IRQ_FALLING)
btnC = Button(37,Cpressed, trigger=Pin.IRQ_FALLING)
while True:
time.sleep(0.2)
snake.go()
|
py | b410a1c2a7ffaab5e3100a670f9684254bc4b69c | from unittest import TestCase
from .uoi_strategy import UoiStrategy
from ts import (
LanguageLibrary,
Parser,
CSyntax,
)
class TestAbsStrategu(TestCase):
def setUp(self) -> None:
LanguageLibrary.build()
self._language = LanguageLibrary.c()
self._parser = Parser.create_with_language(self._language)
self._syntax = CSyntax()
def test_capture_update_expression(self) -> None:
program = "--a;"
tree = self._parser.parse(program)
stategy = UoiStrategy(self._parser)
candidates = stategy.capture(tree.root)
self.assertEqual(len(candidates), 1)
def test_mutations_update_expression(self) -> None:
program = "--a;"
tree = self._parser.parse(program)
stategy = UoiStrategy(self._parser)
mutations = stategy.mutations(
self._parser, tree, tree.root
)
self.assertEqual(len(mutations), 1)
def test_mutate_update_expression(self) -> None:
program = "--a;"
tree = self._parser.parse(program)
stategy = UoiStrategy(self._parser)
mutation = stategy.mutate(
tree, tree.root
)
self.assertEqual(mutation.text, "++a;")
def test_capture_arithmetic_unary_expression(self) -> None:
program = "-a;"
tree = self._parser.parse(program)
stategy = UoiStrategy(self._parser)
candidates = stategy.capture(tree.root)
self.assertEqual(len(candidates), 1)
def test_mutations_arithmetic_unary_expression(self) -> None:
program = "-a;"
tree = self._parser.parse(program)
stategy = UoiStrategy(self._parser)
mutations = stategy.mutations(
self._parser, tree, tree.root
)
self.assertEqual(len(mutations), 1)
def test_mutate_arithmetic_unary_expression(self) -> None:
program = "-a;"
tree = self._parser.parse(program)
stategy = UoiStrategy(self._parser)
mutation = stategy.mutate(
tree, tree.root
)
self.assertEqual(mutation.text, "+a;")
def test_capture_logical_unary_expression(self) -> None:
program = "!a;"
tree = self._parser.parse(program)
stategy = UoiStrategy(self._parser)
candidates = stategy.capture(tree.root)
self.assertEqual(len(candidates), 1)
def test_mutations_logical_unary_expression(self) -> None:
program = "!a;"
tree = self._parser.parse(program)
stategy = UoiStrategy(self._parser)
mutations = stategy.mutations(
self._parser, tree, tree.root
)
self.assertEqual(len(mutations), 1)
def test_mutate_logical_unary_expression(self) -> None:
program = "!a;"
tree = self._parser.parse(program)
stategy = UoiStrategy(self._parser)
mutation = stategy.mutate(
tree, tree.root
)
self.assertEqual(mutation.text, "a;") |
py | b410a294a511b465d267074ea9005a605e29f4fb | import os
import sys
sys.path.insert(1, os.path.dirname(os.path.realpath(__file__)) + '/../')
from common import utils
import models
from common.log import log, Log, LogLevel
from common.state import State
from common import cuda
from common import paths
import common.torch
import common.numpy
import math
import torch
import numpy
import argparse
class TestPerturbations:
"""
Test a trained classifier.
"""
def __init__(self, args=None):
"""
Initialize.
:param args: optional arguments if not to use sys.argv
:type args: [str]
"""
self.args = None
""" Arguments of program. """
parser = self.get_parser()
if args is not None:
self.args = parser.parse_args(args)
else:
self.args = parser.parse_args()
self.test_codes = None
""" (numpy.ndarray) Codes for testing. """
self.perturbation_codes = None
""" (numpy.ndarray) Perturbation codes for testing. """
self.model = None
""" (encoder.Encoder) Model to train. """
self.perturbations = None
""" (numpy.ndarray) Perturbations per test image. """
self.original_accuracy = None
""" (numpy.ndarray) Success of classifier. """
self.transfer_accuracy = None
""" (numpy.ndarray) Success of classifier. """
self.original_success = None
""" (numpy.ndarray) Success per test image. """
self.transfer_success = None
""" (numpy.ndarray) Success per test image. """
if self.args.log_file:
utils.makedir(os.path.dirname(self.args.log_file))
Log.get_instance().attach(open(self.args.log_file, 'w'))
log('-- ' + self.__class__.__name__)
for key in vars(self.args):
log('[Testing] %s=%s' % (key, str(getattr(self.args, key))))
def __del__(self):
"""
Remove log file.
"""
if self.args is not None:
if self.args.log_file:
Log.get_instance().detach(self.args.log_file)
def get_parser(self):
"""
Get parser.
:return: parser
:rtype: argparse.ArgumentParser
"""
parser = argparse.ArgumentParser(description='Attack classifier.')
parser.add_argument('-test_images_file', default=paths.test_images_file(), help='HDF5 file containing images.', type=str)
parser.add_argument('-test_codes_file', default=paths.test_codes_file(), help='HDF5 file containing codes.', type=str)
parser.add_argument('-label_index', default=2, help='Label index.', type=int)
parser.add_argument('-classifier_file', default=paths.state_file('classifier'), help='Snapshot state file of classifier.', type=str)
parser.add_argument('-perturbations_file', default=paths.results_file('classifier/perturbations'), help='HDF5 file containing perturbations.', type=str)
parser.add_argument('-original_success_file', default=paths.results_file('classifier/success'), help='HDF5 file containing success.', type=str)
parser.add_argument('-transfer_success_file', default=paths.results_file('classifier/transfer_success', help='HDF5 file containing transfer success.'), type=str)
parser.add_argument('-original_accuracy_file', default=paths.results_file('classifier/accuracy'), help='HDF5 file containing accuracy.', type=str)
parser.add_argument('-transfer_accuracy_file', default=paths.results_file('classifier/transfer_accuracy', help='HDF5 file containing transfer accuracy.'), type=str)
parser.add_argument('-log_file', default=paths.log_file('classifier/attacks'), help='Log file.', type=str)
parser.add_argument('-batch_size', default=128, help='Batch size of attack.', type=int)
parser.add_argument('-no_gpu', dest='use_gpu', action='store_false')
# Some network parameters.
parser.add_argument('-network_architecture', default='standard', help='Classifier architecture to use.', type=str)
parser.add_argument('-network_activation', default='relu', help='Activation function to use.', type=str)
parser.add_argument('-network_no_batch_normalization', default=False, help='Do not use batch normalization.', action='store_true')
parser.add_argument('-network_channels', default=16, help='Channels of first convolutional layer, afterwards channels are doubled.', type=int)
parser.add_argument('-network_dropout', default=False, action='store_true', help='Whether to use dropout.')
parser.add_argument('-network_units', default='1024,1024,1024,1024', help='Units for MLP.')
return parser
def test(self):
"""
Test classifier to identify valid samples to attack.
"""
self.model.eval()
assert self.model.training is False
assert self.perturbation_codes.shape[0] == self.perturbations.shape[0]
assert self.test_codes.shape[0] == self.test_images.shape[0]
assert len(self.perturbations.shape) == 4
assert len(self.test_images.shape) == 4
perturbations_accuracy = None
num_batches = int(math.ceil(self.perturbations.shape[0] / self.args.batch_size))
for b in range(num_batches):
b_start = b * self.args.batch_size
b_end = min((b + 1) * self.args.batch_size, self.perturbations.shape[0])
batch_perturbations = common.torch.as_variable(self.perturbations[b_start: b_end], self.args.use_gpu)
batch_classes = common.torch.as_variable(self.perturbation_codes[b_start: b_end], self.args.use_gpu)
batch_perturbations = batch_perturbations.permute(0, 3, 1, 2)
output_classes = self.model(batch_perturbations)
values, indices = torch.max(torch.nn.functional.softmax(output_classes, dim=1), dim=1)
errors = torch.abs(indices - batch_classes)
perturbations_accuracy = common.numpy.concatenate(perturbations_accuracy, errors.data.cpu().numpy())
for n in range(batch_perturbations.size(0)):
log('[Testing] %d: original success=%d, transfer accuracy=%d' % (n, self.original_success[b_start + n], errors[n].item()))
self.transfer_success[perturbations_accuracy == 0] = -1
self.transfer_success = self.transfer_success.reshape((self.N_samples, self.N_attempts))
self.transfer_success = numpy.swapaxes(self.transfer_success, 0, 1)
utils.makedir(os.path.dirname(self.args.transfer_success_file))
utils.write_hdf5(self.args.transfer_success_file, self.transfer_success)
log('[Testing] wrote %s' % self.args.transfer_success_file)
num_batches = int(math.ceil(self.test_images.shape[0] / self.args.batch_size))
for b in range(num_batches):
b_start = b * self.args.batch_size
b_end = min((b + 1) * self.args.batch_size, self.test_images.shape[0])
batch_images = common.torch.as_variable(self.test_images[b_start: b_end], self.args.use_gpu)
batch_classes = common.torch.as_variable(self.test_codes[b_start: b_end], self.args.use_gpu)
batch_images = batch_images.permute(0, 3, 1, 2)
output_classes = self.model(batch_images)
values, indices = torch.max(torch.nn.functional.softmax(output_classes, dim=1), dim=1)
errors = torch.abs(indices - batch_classes)
self.transfer_accuracy = common.numpy.concatenate(self.transfer_accuracy, errors.data.cpu().numpy())
if b % 100 == 0:
log('[Testing] computing accuracy %d' % b)
self.transfer_accuracy = self.transfer_accuracy == 0
log('[Testing] original accuracy=%g' % (numpy.sum(self.original_accuracy)/float(self.original_accuracy.shape[0])))
log('[Testing] transfer accuracy=%g' % (numpy.sum(self.transfer_accuracy)/float(self.transfer_accuracy.shape[0])))
log('[Testing] accuracy difference=%g' % (numpy.sum(self.transfer_accuracy != self.original_accuracy)/float(self.transfer_accuracy.shape[0])))
log('[Testing] accuracy difference on %d samples=%g' % (self.N_samples, numpy.sum(self.transfer_accuracy[:self.N_samples] != self.original_accuracy[:self.N_samples])/float(self.N_samples)))
self.transfer_accuracy = numpy.logical_and(self.transfer_accuracy, self.original_accuracy)
utils.makedir(os.path.dirname(self.args.transfer_accuracy_file))
utils.write_hdf5(self.args.transfer_accuracy_file, self.transfer_accuracy)
log('[Testing] wrote %s' % self.args.transfer_accuracy_file)
def load_models(self):
"""
Load models.
"""
N_class = numpy.max(self.test_codes) + 1
network_units = list(map(int, self.args.network_units.split(',')))
log('[Testing] using %d input channels' % self.test_images.shape[3])
self.model = models.Classifier(N_class, resolution=(self.test_images.shape[3], self.test_images.shape[1], self.test_images.shape[2]),
architecture=self.args.network_architecture,
activation=self.args.network_activation,
batch_normalization=not self.args.network_no_batch_normalization,
start_channels=self.args.network_channels,
dropout=self.args.network_dropout,
units=network_units)
assert os.path.exists(self.args.classifier_file), 'state file %s not found' % self.args.classifier_file
state = State.load(self.args.classifier_file)
log('[Testing] read %s' % self.args.classifier_file)
self.model.load_state_dict(state.model)
if self.args.use_gpu and not cuda.is_cuda(self.model):
log('[Testing] classifier is not CUDA')
self.model = self.model.cuda()
log('[Testing] loaded classifier')
# !
self.model.eval()
log('[Testing] set classifier to eval')
def load_data(self):
"""
Load data.
"""
self.test_images = utils.read_hdf5(self.args.test_images_file).astype(numpy.float32)
if len(self.test_images.shape) < 4:
self.test_images = numpy.expand_dims(self.test_images, axis=3)
log('[Testing] read %s' % self.args.test_images_file)
self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype(numpy.int)
self.test_codes = self.test_codes[:, self.args.label_index]
log('[Testing] read %s' % self.args.test_codes_file)
self.perturbations = utils.read_hdf5(self.args.perturbations_file).astype(numpy.float32)
self.N_attempts = self.perturbations.shape[0]
self.N_samples = self.perturbations.shape[1]
self.perturbations = numpy.swapaxes(self.perturbations, 0, 1)
if len(self.perturbations.shape) <= 4:
self.perturbations = self.perturbations.reshape((self.perturbations.shape[0] * self.perturbations.shape[1], self.perturbations.shape[2], self.perturbations.shape[3], 1))
else:
self.perturbations = self.perturbations.reshape((self.perturbations.shape[0] * self.perturbations.shape[1], self.perturbations.shape[2], self.perturbations.shape[3], self.perturbations.shape[4]))
log('[Testing] read %s' % self.args.perturbations_file)
self.original_success = utils.read_hdf5(self.args.original_success_file)
self.original_success = numpy.swapaxes(self.original_success, 0, 1)
self.original_success = self.original_success.reshape((self.original_success.shape[0] * self.original_success.shape[1]))
log('[Testing] read %s' % self.args.original_success_file)
self.original_accuracy = utils.read_hdf5(self.args.original_accuracy_file)
log('[Testing] read %s' % self.args.original_accuracy_file)
self.perturbation_codes = numpy.repeat(self.test_codes[:self.N_samples], self.N_attempts, axis=0)
self.transfer_success = numpy.copy(self.original_success)
def main(self):
"""
Main.
"""
self.load_data()
self.load_models()
self.test()
if __name__ == '__main__':
program = TestPerturbations()
program.main()
|
py | b410a4222109e8d96db6e8c128edf83ba7bda8ca | from uctypes import BIG_ENDIAN, addressof, sizeof, struct
def new_struct(layout, data=None):
if data is None:
data = bytes(sizeof(layout, BIG_ENDIAN))
s = struct(addressof(data), layout, BIG_ENDIAN)
return s, data
async def recv_struct(reader, layout):
data = await reader.readexactly(sizeof(layout, BIG_ENDIAN))
return new_struct(layout, data)
def encode_int(i, s=2):
return i.to_bytes(s, "big")
def decode_int(data):
mv = memoryview(data)
i = int.from_bytes(mv[:2], "big")
return i, mv[2:]
def encode_varlen_int(i):
parts = []
while i > 0:
byte = i % 128
i = i // 128
if i > 0:
byte = byte | 128
parts.append(byte)
return bytes(parts)
async def recv_varlen_int(reader):
multiplier = 1
value = 0
while True:
byte = await reader.readexactly(1)
byte = int.from_bytes(byte, "big")
value += (byte & 127) * multiplier
if byte & 128 == 0:
return value
multiplier *= 128
def encode_str(s):
if type(s) == str:
s = s.encode()
return encode_int(len(s)) + s
def decode_str(data):
mv = memoryview(data)
str_len, mv = decode_int(mv)
if str_len == 1:
s = bytes([mv[0]])
else:
s = bytes(mv[:str_len])
return s, mv[str_len:]
|
py | b410a4870ece4a8aabb3af9945ee8e38ff8d22fa | # Copyright 2021, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Python version specifics.
This abstracts the Python version decisions. This makes decisions based on
the numbers, and attempts to give them meaningful names. Where possible it
should attempt to make run time detections.
"""
import __future__
import os
import re
import sys
from nuitka.__past__ import WindowsError
def getSupportedPythonVersions():
"""Officially supported Python versions for Nuitka."""
return ("2.6", "2.7", "3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9")
def getPartiallySupportedPythonVersions():
"""Partially supported Python versions for Nuitka."""
return ()
def getSupportedPythonVersionStr():
supported_python_versions = getSupportedPythonVersions()
supported_python_versions_str = repr(supported_python_versions)[1:-1]
supported_python_versions_str = re.sub(
r"(.*),(.*)$", r"\1, or\2", supported_python_versions_str
)
return supported_python_versions_str
def _getPythonVersion():
big, major, minor = sys.version_info[0:3]
# TODO: Give up on decimal versions already.
return big * 256 + major * 16 + min(15, minor)
python_version = _getPythonVersion()
python_version_full_str = ".".join(str(s) for s in sys.version_info[0:3])
python_version_str = ".".join(str(s) for s in sys.version_info[0:2])
def isNuitkaPython():
"""Is this our own fork of CPython named Nuitka-Python."""
if python_version >= 0x300:
return sys.implementation.name == "nuitkapython"
else:
return sys.subversion[0] == "nuitkapython"
def getErrorMessageExecWithNestedFunction():
"""Error message of the concrete Python in case an exec occurs in a
function that takes a closure variable.
"""
assert python_version < 0x300
# Need to use "exec" to detect the syntax error, pylint: disable=W0122
try:
exec(
"""
def f():
exec ""
def nested():
return closure"""
)
except SyntaxError as e:
return e.message.replace("'f'", "'%s'")
def getComplexCallSequenceErrorTemplate():
if not hasattr(getComplexCallSequenceErrorTemplate, "result"):
try:
# We are doing this on purpose, to get the exception.
# pylint: disable=not-an-iterable,not-callable
f = None
f(*None)
except TypeError as e:
result = (
e.args[0]
.replace("NoneType object", "%s")
.replace("NoneType", "%s")
.replace("None ", "%s ")
)
getComplexCallSequenceErrorTemplate.result = result
else:
sys.exit("Error, cannot detect expected error message.")
return getComplexCallSequenceErrorTemplate.result
_needs_set_literal_reverse_insertion = None
def needsSetLiteralReverseInsertion():
"""For Python3, until Python3.5 ca. the order of set literals was reversed."""
# Cached result, pylint: disable=global-statement
global _needs_set_literal_reverse_insertion
if _needs_set_literal_reverse_insertion is None:
try:
value = eval("{1,1.0}.pop()") # pylint: disable=eval-used
except SyntaxError:
_needs_set_literal_reverse_insertion = False
else:
_needs_set_literal_reverse_insertion = type(value) is float
return _needs_set_literal_reverse_insertion
def needsDuplicateArgumentColOffset():
if python_version < 0x353:
return False
else:
return True
def isDebianPackagePython():
"""Is this Python from a debian package."""
if python_version < 0x300:
return hasattr(sys, "_multiarch")
else:
try:
from distutils.dir_util import _multiarch
except ImportError:
return False
else:
return True
def isUninstalledPython():
# Debian package.
if isDebianPackagePython():
return False
if isStaticallyLinkedPython():
return True
if os.name == "nt":
import ctypes.wintypes
GetSystemDirectory = ctypes.windll.kernel32.GetSystemDirectoryW
GetSystemDirectory.argtypes = (ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD)
GetSystemDirectory.restype = ctypes.wintypes.DWORD
MAX_PATH = 4096
buf = ctypes.create_unicode_buffer(MAX_PATH)
res = GetSystemDirectory(buf, MAX_PATH)
assert res != 0
system_path = os.path.normcase(buf.value)
return not getRunningPythonDLLPath().startswith(system_path)
return (
os.path.exists(os.path.join(sys.prefix, "conda-meta"))
or "WinPython" in sys.version
)
def getRunningPythonDLLPath():
import ctypes.wintypes
MAX_PATH = 4096
buf = ctypes.create_unicode_buffer(MAX_PATH)
GetModuleFileName = ctypes.windll.kernel32.GetModuleFileNameW
GetModuleFileName.argtypes = (
ctypes.wintypes.HANDLE,
ctypes.wintypes.LPWSTR,
ctypes.wintypes.DWORD,
)
GetModuleFileName.restype = ctypes.wintypes.DWORD
# We trust ctypes internals here, pylint: disable=protected-access
res = GetModuleFileName(ctypes.pythonapi._handle, buf, MAX_PATH)
if res == 0:
# Windows only code, pylint: disable=I0021,undefined-variable
raise WindowsError(
ctypes.GetLastError(), ctypes.FormatError(ctypes.GetLastError())
)
dll_path = os.path.normcase(buf.value)
assert os.path.exists(dll_path), dll_path
return dll_path
def getTargetPythonDLLPath():
dll_path = getRunningPythonDLLPath()
from nuitka.Options import isPythonDebug
if dll_path.endswith("_d.dll"):
if not isPythonDebug():
dll_path = dll_path[:-6] + ".dll"
if not os.path.exists(dll_path):
sys.exit("Error, cannot switch to non-debug Python, not installed.")
else:
if isPythonDebug():
dll_path = dll_path[:-4] + "_d.dll"
if not os.path.exists(dll_path):
sys.exit("Error, cannot switch to debug Python, not installed.")
return dll_path
def isStaticallyLinkedPython():
# On Windows, there is no way to detect this from syconfig.
if os.name == "nt":
import ctypes
return ctypes.pythonapi is None
try:
import sysconfig
except ImportError:
# Cannot detect this properly for Python 2.6, but we don't care much
# about that anyway.
return False
result = sysconfig.get_config_var("Py_ENABLE_SHARED") == 0
return result
def getPythonABI():
if hasattr(sys, "abiflags"):
abiflags = sys.abiflags
# Cyclic dependency here.
from nuitka.Options import isPythonDebug
if isPythonDebug() or hasattr(sys, "getobjects"):
if not abiflags.startswith("d"):
abiflags = "d" + abiflags
else:
abiflags = ""
return abiflags
_the_sys_prefix = None
def getSystemPrefixPath():
"""Return real sys.prefix as an absolute path breaking out of virtualenv.
Note:
For Nuitka, it often is OK to break out of the virtualenv, and use the
original install. Mind you, this is not about executing anything, this is
about building, and finding the headers to compile against that Python, we
do not care about any site packages, and so on.
Returns:
str - path to system prefix
"""
global _the_sys_prefix # Cached result, pylint: disable=global-statement
if _the_sys_prefix is None:
sys_prefix = getattr(
sys, "real_prefix", getattr(sys, "base_prefix", sys.prefix)
)
sys_prefix = os.path.abspath(sys_prefix)
# Some virtualenv contain the "orig-prefix.txt" as a textual link to the
# target, this is often on Windows with virtualenv. There are two places to
# look for.
for candidate in (
"Lib/orig-prefix.txt",
"lib/python%s/orig-prefix.txt" % python_version_str,
):
candidate = os.path.join(sys_prefix, candidate)
if os.path.exists(candidate):
with open(candidate) as f:
sys_prefix = f.read()
# Trailing spaces in the python prefix, please not.
assert sys_prefix == sys_prefix.strip()
# This is another for of virtualenv references:
if os.name != "nt" and os.path.islink(os.path.join(sys_prefix, ".Python")):
sys_prefix = os.path.normpath(
os.path.join(os.readlink(os.path.join(sys_prefix, ".Python")), "..")
)
# Some virtualenv created by "venv" seem to have a different structure, where
# library and include files are outside of it.
if (
os.name != "nt"
and python_version >= 0x330
and os.path.exists(os.path.join(sys_prefix, "bin/activate"))
):
python_binary = os.path.join(sys_prefix, "bin", "python")
python_binary = os.path.realpath(python_binary)
sys_prefix = os.path.normpath(os.path.join(python_binary, "..", ".."))
_the_sys_prefix = sys_prefix
return _the_sys_prefix
def getFutureModuleKeys():
result = [
"unicode_literals",
"absolute_import",
"division",
"print_function",
"generator_stop",
"nested_scopes",
"generators",
"with_statement",
]
if hasattr(__future__, "barry_as_FLUFL"):
result.append("barry_as_FLUFL")
if hasattr(__future__, "annotations"):
result.append("annotations")
return result
def getImportlibSubPackages():
result = []
if python_version >= 0x270:
import importlib
import pkgutil
for module_info in pkgutil.walk_packages(importlib.__path__):
result.append(module_info[1])
return result
|
py | b410a4a4aa4bee84e55df1fa7589dc35bf9f835c | from zycelium.ansiformat import AnsiFormat
def test_no_formatting():
af = AnsiFormat()
astr = af("hello")
assert "'hello'" == astr.raw
def test_no_color():
af = AnsiFormat(term_colors=0)
astr = af("hello")
assert isinstance(astr, str)
assert r"'hello'" == astr.raw
def test_foreground_color_ansi8():
af = AnsiFormat(term_colors=8)
astr = af("hello").fg("red")
assert r"'\x1b[38;5;1mhello\x1b[0m'" == astr.raw
def test_foreground_color_ansi8_via_property():
af = AnsiFormat(term_colors=8)
astr = af("hello").fg_red
assert r"'\x1b[38;5;1mhello\x1b[0m'" == astr.raw
def test_background_color_ansi8_via_property():
af = AnsiFormat(term_colors=8)
astr = af("hello").bg_red
assert r"'\x1b[48;5;1mhello\x1b[0m'" == astr.raw
def test_effect_bold_ansi8_via_property():
af = AnsiFormat(term_colors=8)
astr = af("hello").ef_bold
assert r"'\x1b[1mhello\x1b[0m'" == astr.raw
def test_effect_bold_ansi8_via_shortcut():
af = AnsiFormat(term_colors=8)
astr = af("hello").b
assert r"'\x1b[1mhello\x1b[0m'" == astr.raw
|
py | b410a4ee54559fd66b11ee7528c7f389b28a6760 | # -*- test-case-name: twisted.test.test_internet -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Posix reactor base class
"""
import warnings
import socket
import errno
import os
from zope.interface import implements, classImplements
from twisted.python.compat import set
from twisted.internet.interfaces import IReactorUNIX, IReactorUNIXDatagram
from twisted.internet.interfaces import IReactorTCP, IReactorUDP, IReactorSSL, IReactorArbitrary
from twisted.internet.interfaces import IReactorProcess, IReactorMulticast
from twisted.internet.interfaces import IHalfCloseableDescriptor
from twisted.internet import error
from twisted.internet import tcp, udp
from twisted.python import log, failure, util
from twisted.persisted import styles
from twisted.python.runtime import platformType, platform
from twisted.internet.base import ReactorBase, _SignalReactorMixin
try:
from twisted.internet import ssl
sslEnabled = True
except ImportError:
sslEnabled = False
try:
from twisted.internet import unix
unixEnabled = True
except ImportError:
unixEnabled = False
processEnabled = False
if platformType == 'posix':
from twisted.internet import fdesc
import process
processEnabled = True
if platform.isWindows():
try:
import win32process
processEnabled = True
except ImportError:
win32process = None
class _SocketWaker(log.Logger, styles.Ephemeral):
"""
The I{self-pipe trick<http://cr.yp.to/docs/selfpipe.html>}, implemented
using a pair of sockets rather than pipes (due to the lack of support in
select() on Windows for pipes), used to wake up the main loop from
another thread.
"""
disconnected = 0
def __init__(self, reactor):
"""Initialize.
"""
self.reactor = reactor
# Following select_trigger (from asyncore)'s example;
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
server.bind(('127.0.0.1', 0))
server.listen(1)
client.connect(server.getsockname())
reader, clientaddr = server.accept()
client.setblocking(0)
reader.setblocking(0)
self.r = reader
self.w = client
self.fileno = self.r.fileno
def wakeUp(self):
"""Send a byte to my connection.
"""
try:
util.untilConcludes(self.w.send, 'x')
except socket.error, (err, msg):
if err != errno.WSAEWOULDBLOCK:
raise
def doRead(self):
"""Read some data from my connection.
"""
try:
self.r.recv(8192)
except socket.error:
pass
def connectionLost(self, reason):
self.r.close()
self.w.close()
class _PipeWaker(log.Logger, styles.Ephemeral):
"""
The I{self-pipe trick<http://cr.yp.to/docs/selfpipe.html>}, used to wake
up the main loop from another thread or a signal handler.
"""
disconnected = 0
i = None
o = None
def __init__(self, reactor):
"""Initialize.
"""
self.reactor = reactor
self.i, self.o = os.pipe()
fdesc.setNonBlocking(self.i)
fdesc._setCloseOnExec(self.i)
fdesc.setNonBlocking(self.o)
fdesc._setCloseOnExec(self.o)
self.fileno = lambda: self.i
def doRead(self):
"""Read some bytes from the pipe.
"""
fdesc.readFromFD(self.fileno(), lambda data: None)
def wakeUp(self):
"""Write one byte to the pipe, and flush it.
"""
# We don't use fdesc.writeToFD since we need to distinguish
# between EINTR (try again) and EAGAIN (do nothing).
if self.o is not None:
try:
util.untilConcludes(os.write, self.o, 'x')
except OSError, e:
if e.errno != errno.EAGAIN:
raise
def connectionLost(self, reason):
"""Close both ends of my pipe.
"""
if not hasattr(self, "o"):
return
for fd in self.i, self.o:
try:
os.close(fd)
except IOError:
pass
del self.i, self.o
if platformType == 'posix':
_Waker = _PipeWaker
else:
# Primarily Windows and Jython.
_Waker = _SocketWaker
class PosixReactorBase(_SignalReactorMixin, ReactorBase):
"""
A basis for reactors that use file descriptors.
"""
implements(IReactorArbitrary, IReactorTCP, IReactorUDP, IReactorMulticast)
def __init__(self):
ReactorBase.__init__(self)
if self.usingThreads or platformType == "posix":
self.installWaker()
def _disconnectSelectable(self, selectable, why, isRead, faildict={
error.ConnectionDone: failure.Failure(error.ConnectionDone()),
error.ConnectionLost: failure.Failure(error.ConnectionLost())
}):
"""
Utility function for disconnecting a selectable.
Supports half-close notification, isRead should be boolean indicating
whether error resulted from doRead().
"""
self.removeReader(selectable)
f = faildict.get(why.__class__)
if f:
if (isRead and why.__class__ == error.ConnectionDone
and IHalfCloseableDescriptor.providedBy(selectable)):
selectable.readConnectionLost(f)
else:
self.removeWriter(selectable)
selectable.connectionLost(f)
else:
self.removeWriter(selectable)
selectable.connectionLost(failure.Failure(why))
def installWaker(self):
"""
Install a `waker' to allow threads and signals to wake up the IO thread.
We use the self-pipe trick (http://cr.yp.to/docs/selfpipe.html) to wake
the reactor. On Windows we use a pair of sockets.
"""
if not self.waker:
self.waker = _Waker(self)
self._internalReaders.add(self.waker)
self.addReader(self.waker)
# IReactorProcess
def spawnProcess(self, processProtocol, executable, args=(),
env={}, path=None,
uid=None, gid=None, usePTY=0, childFDs=None):
args, env = self._checkProcessArgs(args, env)
if platformType == 'posix':
if usePTY:
if childFDs is not None:
raise ValueError("Using childFDs is not supported with usePTY=True.")
return process.PTYProcess(self, executable, args, env, path,
processProtocol, uid, gid, usePTY)
else:
return process.Process(self, executable, args, env, path,
processProtocol, uid, gid, childFDs)
elif platformType == "win32":
if uid is not None or gid is not None:
raise ValueError("The uid and gid parameters are not supported on Windows.")
if usePTY:
raise ValueError("The usePTY parameter is not supported on Windows.")
if childFDs:
raise ValueError("Customizing childFDs is not supported on Windows.")
if win32process:
from twisted.internet._dumbwin32proc import Process
return Process(self, processProtocol, executable, args, env, path)
else:
raise NotImplementedError, "spawnProcess not available since pywin32 is not installed."
else:
raise NotImplementedError, "spawnProcess only available on Windows or POSIX."
# IReactorUDP
def listenUDP(self, port, protocol, interface='', maxPacketSize=8192):
"""Connects a given L{DatagramProtocol} to the given numeric UDP port.
@returns: object conforming to L{IListeningPort}.
"""
p = udp.Port(port, protocol, interface, maxPacketSize, self)
p.startListening()
return p
# IReactorMulticast
def listenMulticast(self, port, protocol, interface='', maxPacketSize=8192, listenMultiple=False):
"""Connects a given DatagramProtocol to the given numeric UDP port.
EXPERIMENTAL.
@returns: object conforming to IListeningPort.
"""
p = udp.MulticastPort(port, protocol, interface, maxPacketSize, self, listenMultiple)
p.startListening()
return p
# IReactorUNIX
def connectUNIX(self, address, factory, timeout=30, checkPID=0):
"""@see: twisted.internet.interfaces.IReactorUNIX.connectUNIX
"""
assert unixEnabled, "UNIX support is not present"
c = unix.Connector(address, factory, timeout, self, checkPID)
c.connect()
return c
_unspecified = object()
def _checkMode(self, name, mode):
"""
Check C{mode} to see if a value was specified for it and emit a
deprecation warning if so. Return the default value if none was
specified, otherwise return C{mode}.
"""
if mode is not self._unspecified:
warnings.warn(
'The mode parameter of %(name)s will be removed. Do not pass '
'a value for it. Set permissions on the containing directory '
'before calling %(name)s, instead.' % dict(name=name),
category=DeprecationWarning,
stacklevel=3)
else:
mode = 0666
return mode
def listenUNIX(self, address, factory, backlog=50, mode=_unspecified,
wantPID=0):
"""
@see: twisted.internet.interfaces.IReactorUNIX.listenUNIX
"""
assert unixEnabled, "UNIX support is not present"
mode = self._checkMode('IReactorUNIX.listenUNIX', mode)
p = unix.Port(address, factory, backlog, mode, self, wantPID)
p.startListening()
return p
# IReactorUNIXDatagram
def listenUNIXDatagram(self, address, protocol, maxPacketSize=8192,
mode=_unspecified):
"""
Connects a given L{DatagramProtocol} to the given path.
EXPERIMENTAL.
@returns: object conforming to L{IListeningPort}.
"""
assert unixEnabled, "UNIX support is not present"
mode = self._checkMode('IReactorUNIXDatagram.listenUNIXDatagram', mode)
p = unix.DatagramPort(address, protocol, maxPacketSize, mode, self)
p.startListening()
return p
def connectUNIXDatagram(self, address, protocol, maxPacketSize=8192,
mode=_unspecified, bindAddress=None):
"""
Connects a L{ConnectedDatagramProtocol} instance to a path.
EXPERIMENTAL.
"""
assert unixEnabled, "UNIX support is not present"
mopde = self._checkMode('IReactorUNIXDatagram.connectUNIXDatagram', mode)
p = unix.ConnectedDatagramPort(address, protocol, maxPacketSize, mode, bindAddress, self)
p.startListening()
return p
# IReactorTCP
def listenTCP(self, port, factory, backlog=50, interface=''):
"""@see: twisted.internet.interfaces.IReactorTCP.listenTCP
"""
p = tcp.Port(port, factory, backlog, interface, self)
p.startListening()
return p
def connectTCP(self, host, port, factory, timeout=30, bindAddress=None):
"""@see: twisted.internet.interfaces.IReactorTCP.connectTCP
"""
c = tcp.Connector(host, port, factory, timeout, bindAddress, self)
c.connect()
return c
# IReactorSSL (sometimes, not implemented)
def connectSSL(self, host, port, factory, contextFactory, timeout=30, bindAddress=None):
"""@see: twisted.internet.interfaces.IReactorSSL.connectSSL
"""
assert sslEnabled, "SSL support is not present"
c = ssl.Connector(host, port, factory, contextFactory, timeout, bindAddress, self)
c.connect()
return c
def listenSSL(self, port, factory, contextFactory, backlog=50, interface=''):
"""@see: twisted.internet.interfaces.IReactorSSL.listenSSL
"""
assert sslEnabled, "SSL support is not present"
p = ssl.Port(port, factory, contextFactory, backlog, interface, self)
p.startListening()
return p
# IReactorArbitrary
def listenWith(self, portType, *args, **kw):
kw['reactor'] = self
p = portType(*args, **kw)
p.startListening()
return p
def connectWith(self, connectorType, *args, **kw):
kw['reactor'] = self
c = connectorType(*args, **kw)
c.connect()
return c
def _removeAll(self, readers, writers):
"""
Remove all readers and writers, and list of removed L{IReadDescriptor}s
and L{IWriteDescriptor}s.
Meant for calling from subclasses, to implement removeAll, like::
def removeAll(self):
return self._removeAll(self._reads, self._writes)
where C{self._reads} and C{self._writes} are iterables.
"""
removedReaders = set(readers) - self._internalReaders
for reader in removedReaders:
self.removeReader(reader)
removedWriters = set(writers)
for writer in removedWriters:
self.removeWriter(writer)
return list(removedReaders | removedWriters)
if sslEnabled:
classImplements(PosixReactorBase, IReactorSSL)
if unixEnabled:
classImplements(PosixReactorBase, IReactorUNIX, IReactorUNIXDatagram)
if processEnabled:
classImplements(PosixReactorBase, IReactorProcess)
__all__ = ["PosixReactorBase"]
|
py | b410a641eac666b51ea0f711361d33ed2dcdf7eb | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.networks.layers import SavitzkyGolayFilter
from tests.utils import skip_if_no_cuda
# Zero-padding trivial tests
TEST_CASE_SINGLE_VALUE = [
{"window_length": 3, "order": 1},
torch.Tensor([1.0]).unsqueeze(0).unsqueeze(0), # Input data: Single value
torch.Tensor([1 / 3]).unsqueeze(0).unsqueeze(0), # Expected output: With a window length of 3 and polyorder 1
# output should be equal to mean of 0, 1 and 0 = 1/3 (because input will be zero-padded and a linear fit performed)
1e-15, # absolute tolerance
]
TEST_CASE_1D = [
{"window_length": 3, "order": 1},
torch.Tensor([1.0, 1.0, 1.0]).unsqueeze(0).unsqueeze(0), # Input data
torch.Tensor([2 / 3, 1.0, 2 / 3])
.unsqueeze(0)
.unsqueeze(0), # Expected output: zero padded, so linear interpolation
# over length-3 windows will result in output of [2/3, 1, 2/3].
1e-15, # absolute tolerance
]
TEST_CASE_2D_AXIS_2 = [
{"window_length": 3, "order": 1}, # along default axis (2, first spatial dim)
torch.ones((3, 2)).unsqueeze(0).unsqueeze(0),
torch.Tensor([[2 / 3, 2 / 3], [1.0, 1.0], [2 / 3, 2 / 3]]).unsqueeze(0).unsqueeze(0),
1e-15, # absolute tolerance
]
TEST_CASE_2D_AXIS_3 = [
{"window_length": 3, "order": 1, "axis": 3}, # along axis 3 (second spatial dim)
torch.ones((2, 3)).unsqueeze(0).unsqueeze(0),
torch.Tensor([[2 / 3, 1.0, 2 / 3], [2 / 3, 1.0, 2 / 3]]).unsqueeze(0).unsqueeze(0),
1e-15, # absolute tolerance
]
# Replicated-padding trivial tests
TEST_CASE_SINGLE_VALUE_REP = [
{"window_length": 3, "order": 1, "mode": "replicate"},
torch.Tensor([1.0]).unsqueeze(0).unsqueeze(0), # Input data: Single value
torch.Tensor([1.0]).unsqueeze(0).unsqueeze(0), # Expected output: With a window length of 3 and polyorder 1
# output will be equal to mean of [1, 1, 1] = 1 (input will be nearest-neighbour-padded and a linear fit performed)
1e-15, # absolute tolerance
]
TEST_CASE_1D_REP = [
{"window_length": 3, "order": 1, "mode": "replicate"},
torch.Tensor([1.0, 1.0, 1.0]).unsqueeze(0).unsqueeze(0), # Input data
torch.Tensor([1.0, 1.0, 1.0]).unsqueeze(0).unsqueeze(0), # Expected output: zero padded, so linear interpolation
# over length-3 windows will result in output of [2/3, 1, 2/3].
1e-15, # absolute tolerance
]
TEST_CASE_2D_AXIS_2_REP = [
{"window_length": 3, "order": 1, "mode": "replicate"}, # along default axis (2, first spatial dim)
torch.ones((3, 2)).unsqueeze(0).unsqueeze(0),
torch.Tensor([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]).unsqueeze(0).unsqueeze(0),
1e-15, # absolute tolerance
]
TEST_CASE_2D_AXIS_3_REP = [
{"window_length": 3, "order": 1, "axis": 3, "mode": "replicate"}, # along axis 3 (second spatial dim)
torch.ones((2, 3)).unsqueeze(0).unsqueeze(0),
torch.Tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]).unsqueeze(0).unsqueeze(0),
1e-15, # absolute tolerance
]
# Sine smoothing
TEST_CASE_SINE_SMOOTH = [
{"window_length": 3, "order": 1},
# Sine wave with period equal to savgol window length (windowed to reduce edge effects).
torch.as_tensor(np.sin(2 * np.pi * 1 / 3 * np.arange(100)) * np.hanning(100)).unsqueeze(0).unsqueeze(0),
# Should be smoothed out to zeros
torch.zeros(100).unsqueeze(0).unsqueeze(0),
# tolerance chosen by examining output of SciPy.signal.savgol_filter when provided the above input
2e-2, # absolute tolerance
]
class TestSavitzkyGolayCPU(unittest.TestCase):
@parameterized.expand(
[TEST_CASE_SINGLE_VALUE, TEST_CASE_1D, TEST_CASE_2D_AXIS_2, TEST_CASE_2D_AXIS_3, TEST_CASE_SINE_SMOOTH]
)
def test_value(self, arguments, image, expected_data, atol):
result = SavitzkyGolayFilter(**arguments)(image)
np.testing.assert_allclose(result, expected_data, atol=atol)
class TestSavitzkyGolayCPUREP(unittest.TestCase):
@parameterized.expand(
[TEST_CASE_SINGLE_VALUE_REP, TEST_CASE_1D_REP, TEST_CASE_2D_AXIS_2_REP, TEST_CASE_2D_AXIS_3_REP]
)
def test_value(self, arguments, image, expected_data, atol):
result = SavitzkyGolayFilter(**arguments)(image)
np.testing.assert_allclose(result, expected_data, atol=atol)
@skip_if_no_cuda
class TestSavitzkyGolayGPU(unittest.TestCase):
@parameterized.expand(
[TEST_CASE_SINGLE_VALUE, TEST_CASE_1D, TEST_CASE_2D_AXIS_2, TEST_CASE_2D_AXIS_3, TEST_CASE_SINE_SMOOTH]
)
def test_value(self, arguments, image, expected_data, atol):
result = SavitzkyGolayFilter(**arguments)(image.to(device="cuda"))
np.testing.assert_allclose(result.cpu(), expected_data, atol=atol)
@skip_if_no_cuda
class TestSavitzkyGolayGPUREP(unittest.TestCase):
@parameterized.expand(
[TEST_CASE_SINGLE_VALUE_REP, TEST_CASE_1D_REP, TEST_CASE_2D_AXIS_2_REP, TEST_CASE_2D_AXIS_3_REP]
)
def test_value(self, arguments, image, expected_data, atol):
result = SavitzkyGolayFilter(**arguments)(image.to(device="cuda"))
np.testing.assert_allclose(result.cpu(), expected_data, atol=atol)
if __name__ == "__main__":
unittest.main()
|
py | b410a65d7c3af0eaafbe11dd5157333274fa1349 | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2020 all rights reserved
#
class Leaf:
"""
Mix-in class that provides an implementation of the subset of the interface of {Node} that
requires traversals of the expression graph rooted at leaf nodes.
"""
# interface
@property
def span(self):
"""
Traverse my subgraph and yield all its nodes
"""
# just myself
yield self
# and nothing else
return
# end of file
|
py | b410a7701351e36198d6eba264b47f33549080ac | ## Dependencies
from accelerate import Accelerator
import accelerate
import pytesseract
import torchmetrics
import math
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import pandas as pd
from PIL import Image
import json
import numpy as np
from tqdm.auto import tqdm
from torchvision.transforms import ToTensor
import torch.nn.functional as F
import torch.nn as nn
import torchvision.models as models
from einops import rearrange
from einops import rearrange as rearr
from sklearn.model_selection import train_test_split as tts
from torch.autograd import Variable
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import ToTensor
from modeling import DocFormer
batch_size = 9
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
@property
def avg(self):
return (self.sum / self.count) if self.count>0 else 0
## Loggers
class Logger:
def __init__(self, filename, format='csv'):
self.filename = filename + '.' + format
self._log = []
self.format = format
def save(self, log, epoch=None):
log['epoch'] = epoch + 1
self._log.append(log)
if self.format == 'json':
with open(self.filename, 'w') as f:
json.dump(self._log, f)
else:
pd.DataFrame(self._log).to_csv(self.filename, index=False)
def train_fn(data_loader, model, criterion, optimizer, epoch, device, scheduler=None):
model.train()
accelerator = Accelerator()
model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
loop = tqdm(data_loader, leave=True)
log = None
train_acc = torchmetrics.Accuracy()
loop = tqdm(data_loader)
for batch in loop:
input_ids = batch["input_ids"].to(device)
attention_mask = batch["attention_mask"].to(device)
labels = batch["mlm_labels"].to(device)
# process
outputs = model(batch)
ce_loss = criterion(outputs.transpose(1,2), labels)
if log is None:
log = {}
log["ce_loss"] = AverageMeter()
log['accuracy'] = AverageMeter()
optimizer.zero_grad()
accelerator.backward(ce_loss)
optimizer.step()
if scheduler is not None:
scheduler.step()
log['accuracy'].update(train_acc(labels.cpu(),torch.argmax(outputs,-1).cpu()).item(),batch_size)
log['ce_loss'].update(ce_loss.item())
loop.set_postfix({k: v.avg for k, v in log.items()})
return log
# Function for the validation data loader
def eval_fn(data_loader, model, criterion, device):
model.eval()
log = None
val_acc = torchmetrics.Accuracy()
with torch.no_grad():
loop = tqdm(data_loader, total=len(data_loader), leave=True)
for batch in loop:
input_ids = batch["input_ids"].to(device)
attention_mask = batch["attention_mask"].to(device)
labels = batch["mlm_labels"].to(device)
output = model(batch)
ce_loss = criterion(output.transpose(1,2), labels)
if log is None:
log = {}
log["ce_loss"] = AverageMeter()
log['accuracy'] = AverageMeter()
log['accuracy'].update(val_acc(labels.cpu(),torch.argmax(output,-1).cpu()).item(),batch_size)
log['ce_loss'].update(ce_loss.item())
loop.set_postfix({k: v.avg for k, v in log.items()})
return log # ['total_loss']
date = '20Oct'
def run(config,train_dataloader,val_dataloader,device,epochs,path,classes,lr = 5e-5):
logger = Logger(f"{path}/logs")
model = DocFormerForClassification(config,classes).to(device)
criterion = nn.CrossEntropyLoss()
criterion = criterion.to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
best_val_loss = 1e9
header_printed = False
batch_size = config['batch_size']
for epoch in range(epochs):
print("Training the model.....")
train_log = train_fn(
train_dataloader, model, criterion, optimizer, epoch, device, scheduler=None
)
print("Validating the model.....")
valid_log = eval_fn(val_dataloader, model, criterion, device)
log = {k: v.avg for k, v in train_log.items()}
log.update({"V/" + k: v.avg for k, v in valid_log.items()})
logger.save(log, epoch)
keys = sorted(log.keys())
if not header_printed:
print(" ".join(map(lambda k: f"{k[:8]:8}", keys)))
header_printed = True
print(" ".join(map(lambda k: f"{log[k]:8.3f}"[:8], keys)))
if log["V/ce_loss"] < best_val_loss:
best_val_loss = log["V/ce_loss"]
print("Best model found at epoch {}".format(epoch + 1))
torch.save(model.state_dict(), f"{path}/docformer_best_{epoch}_{date}.pth")
|
py | b410a809b0a1fc65d68b9039686c2e5d282f3484 | ## import pricing functions
import numpy as np
from monte_carlo import sim_gbm_paths
from pricing.vanilla.binomial_tree import binomial_tree_bs
from pricing.vanilla.finite_differences import fidi_bs_eu, fidi_bs_american
from pricing.vanilla.integration import closed_form_bs_eu, laplace_heston_eu, fast_fourier_bs_eu, fast_fourier_heston_eu
from pricing.vanilla.monte_carlo import monte_carlo_bs_eu, monte_carlo_bs_am
## Binomial tree: Pricing via the Binomial tree model of Cox Ross and Rubinstein
spot = 32 # spot price
strike = 32 # strike price
r = 0.02 # annual risk free interest rate
sigma = 0.3 # volatility
mt = 1.4 # maturity time in years
m = 10000 # number of steps, must be an integer
american_exercise = True # American exercise True or False depending on options exercise style American/European style
option_type = "put" # option_type can be "put" or "call"
# Compute:
v0_binomial_tree = binomial_tree_bs(spot, strike, r, sigma, mt, m, option_type, american_exercise)
print(v0_binomial_tree)
## Finite-differences European exercise call
strike = 32 # strike price
r = 0.02 # annual risk free interest rate
sigma = 0.3 # volatility
mt = 1.4 # maturity time in years
# bounds in space
a = -0.5
b = 0.5
m = 500 # number of steps in space, must be an integer
nu_max = 10000 # number of steps in time
# type of finite differences scheme (explicit, implicit or Crank-Nicolson (cn))
scheme = "cn" # possible are "cn", "implicit", "explicit"
# Compute:
[v0_fidi_eu, s0_fidi_eu] = fidi_bs_eu(strike, r, sigma, mt, a, b, m, nu_max, scheme)
spot = [26, 28, 30, 32, 34, 36, 38] # list of spot prices
v0_fidi_eu_interp = np.interp(spot, s0_fidi_eu, v0_fidi_eu) # find the price for different spots via linear interpolation
print(v0_fidi_eu_interp)
## Finite-differences American exercise put
strike = 32 # strike price
r = 0.02 # annual risk free interest rate
sigma = 0.3 # volatility
mt = 1.4 # maturity time in years
# bounds in space
a = -0.5
b = 0.5
m = 1000 # number of steps in space, must be an integer
nu_max = 20000 # number of steps in time
# Compute:
[v0_fidi_am, s0_fidi_am] = fidi_bs_american(strike, r, sigma, mt, a, b, m, nu_max)
spot = [26, 28, 30, 32, 34, 36, 38] # list of spot prices
v0_fidi_am_interp = np.interp(spot, s0_fidi_am, v0_fidi_am) # find the price for different spots via linear interpolation
print(v0_fidi_am_interp)
## black scholes explicit formula
spot = 110 # spot price
strike = 100 # strike price
r = 0.05 # annual risk free interest rate
sigma = 0.2 # volatility
mt = 1 # maturity time in years
option_type = "call" # put or call
# Compute:
vt_bs = closed_form_bs_eu(spot, strike, r, sigma, mt, option_type, t=0)
print(vt_bs)
## heston call/put via laplace transform
spot = 110 # spot price
strike = 100 # strike price
r = 0.05 # annual risk free interest rate
mt = 1 # maturity time in years
# heston volatility dynamics parameters
sigma_tilde = 0.2
nu0 = 0.3**2
kappa = 0.3**2
lamb = 2.5
option_type = "call" # put or call
[vt_heston, abserr] = laplace_heston_eu(spot, strike, r, sigma_tilde, mt, nu0, kappa, lamb, option_type, t=0)
print(vt_heston)
## Pricing via Fast Fourier transform in the black-scholes model
# Note:
# FFT prices simultaneously for a list of strikes
# After FFT pricing, returned values are obtained via linear interpolation
# m/n is the mesh size of the integral approximation via the midpoint rule, m should be large and m/n should be small
# however m/n also effects the gap size between values over which is interpolated and should not be too small
spot = 110
strikes = [95, 100, 105, 110, 115, 120, 125] # a list of strike prices
# strikes = np.arange(90, 180, 0.1) # or a numpy array of strike prices
r = 0.05
sigma = 0.2
mt = 1
option_type = "call"
# Compute:
[vt_fft_bs_interpolated, vt_fft_bs, strikes_fft_bs] = fast_fourier_bs_eu(spot, strikes, r, sigma, mt, option_type, n=10000, m=400, t=0)
print(vt_fft_bs_interpolated)
print(vt_fft_bs)
print(strikes_fft_bs)
### Pricing via Fast Fourier transform in the heston model
# Note:
# FFT prices simultaneously for a list of strikes
# After FFT pricing, returned values are obtained via linear interpolation
# m/n is the mesh size of the integral approximation via the midpoint rule, m should be large and m/n should be small
# m also effects the gap size between values over which is interpolated and should not be too small
spot = 110
strikes = [95, 100, 105, 110, 115, 120, 125] # a list of strike prices
# strikes = np.arange(90, 180, 0.1) # or a numpy array of strike prices
r = 0.05
mt = 1
option_type = "call"
sigma_tilde = 0.2
nu0 = 0.3**2
kappa = 0.3**2
lamb = 2.5
[vt_fft_heston_interpolated, vt_fft_heston, strikes_fft_heston] = fast_fourier_heston_eu(spot, strikes, r, sigma_tilde, mt, nu0, kappa, lamb, option_type, n=10000, m=400, t=0)
print(vt_fft_heston_interpolated)
print(vt_fft_heston)
print(strikes_fft_heston)
## Monte Carlo Black Scholes EU
spot = 110 # spot price
strike = 100 # strike price
r = 0.05 # annual risk free interest rate
sigma = 0.2 # volatility
mt = 1 # maturity time in years
d = 0 # dividend yield, NOTE: only possible without importance sampling
option_type = "put"
antithetic = True
n = 100000
# reference value put: 2.785896190661841
[v0, ci] = monte_carlo_bs_eu(spot, strike, r, d, sigma, mt, n, option_type, antithetic)
print(v0)
print(ci)
# reference value call: 17.66295374059044
option_type = "call"
[v0, ci] = monte_carlo_bs_eu(spot, strike, r, d, sigma, mt, n, option_type, antithetic)
print(v0)
print(ci) # 95% confidence interval
## Monte carlo Black Scholes EU Importance Sampling
spot = 110
strike = 60
r = 0.05
sigma = 0.2
mt = 1
n = 100000
d = 1 # dividend yield, NOTE: must be zero or otherwise will be set to zero if importance sampling is used
antithetic = True
# reference value: 0.002160509264695208
importance_sampling = True
option_type = "put"
[v0, ci] = monte_carlo_bs_eu(spot, strike, r, d, sigma, mt, n, option_type, antithetic, importance_sampling)
print(v0)
print(ci)
spot = 110
strike = 180
# reference value: 0.12896384364721736
option_type = "call"
[v0, ci] = monte_carlo_bs_eu(spot, strike, r, d, sigma, mt, n, option_type, antithetic, importance_sampling)
print(v0)
print(ci)
## Monte Carlo Black Scholes Am using longstaff schwartz
spot = 32 # spot price
strike = 32 # strike price
r = 0.02 # annual risk free interest rate
sigma = 0.3 # volatility
mt = 1.4 # maturity time in years
d = 0 # annual dividend yield
m = 100 # number of equidistant exercise dates
n = 100000 # number of simulated paths
antithetic = True # True if half of the simulated paths should be antithetic
option_type = "put" # put or call
k = 3 # number of basis function to use, for polynomial no limit, for laguerre 7 is the maximum
basis = "laguerre" # polynomial and laguerre basis functions are possible, laguerre basis has
fit_method = "inv" # possible values are 'inv' (usual inverse method), 'qr' (QR-decomposition), 'svd' singular value decomposition
# reference from binomiall tree: 4.1002953921226295
# generate Geometric Brownian Motion paths
paths = sim_gbm_paths(spot, sigma, mt, r, m, n, d, antithetic)
[v0, se] = monte_carlo_bs_am(strike, r, mt, option_type, paths, k, basis, fit_method)
print(v0)
print(se) # the standard error of the Monte Carlo estimate
|
py | b410a9b9914735d844515fc513dacc7b220ab6f3 | """
Agent API
This document refers to Symphony API calls to send and receive messages and content. They need the on-premise Agent installed to perform decryption/encryption of content. - sessionToken and keyManagerToken can be obtained by calling the authenticationAPI on the symphony back end and the key manager respectively. Refer to the methods described in authenticatorAPI.yaml. - A new authorizationToken has been introduced in the authenticationAPI response payload. It can be used to replace the sessionToken in any of the API calls and can be passed as \"Authorization\" header. - Actions are defined to be atomic, ie will succeed in their entirety or fail and have changed nothing. - If it returns a 40X status then it will have sent no message to any stream even if a request to some subset of the requested streams would have succeeded. - If this contract cannot be met for any reason then this is an error and the response code will be 50X. - MessageML is a markup language for messages. See reference here: https://rest-api.symphony.com/docs/messagemlv2 - **Real Time Events**: The following events are returned when reading from a real time messages and events stream (\"datafeed\"). These events will be returned for datafeeds created with the v5 endpoints. To know more about the endpoints, refer to Create Messages/Events Stream and Read Messages/Events Stream. Unless otherwise specified, all events were added in 1.46. # noqa: E501
The version of the OpenAPI document: 22.5.1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from typing import List, Union
from symphony.bdk.gen.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from symphony.bdk.gen.exceptions import ApiAttributeError
from symphony.bdk.gen.agent_model.v4_stream import V4Stream
from symphony.bdk.gen.agent_model.v4_user import V4User
globals()['V4Stream'] = V4Stream
globals()['V4User'] = V4User
class V4RoomMemberPromotedToOwner(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a agent_model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a agent_model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'stream': (V4Stream, none_type), # noqa: E501
'affected_user': (V4User, none_type), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'stream': 'stream', # noqa: E501
'affected_user': 'affectedUser', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""V4RoomMemberPromotedToOwner - a agent_model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the agent_model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
stream (V4Stream): [optional] # noqa: E501
affected_user (V4User): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""V4RoomMemberPromotedToOwner - a agent_model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the agent_model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
stream (V4Stream): [optional] # noqa: E501
affected_user (V4User): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.stream: V4Stream = None
self.affected_user: V4User = None
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
py | b410aae910771a7eccd485f20e02d7c77837dda4 | '''OpenGL extension SUN.get_transparent_index
This module customises the behaviour of the
OpenGL.raw.GLX.SUN.get_transparent_index to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/SUN/get_transparent_index.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLX import _types, _glgets
from OpenGL.raw.GLX.SUN.get_transparent_index import *
from OpenGL.raw.GLX.SUN.get_transparent_index import _EXTENSION_NAME
def glInitGetTransparentIndexSUN():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION |
py | b410abc2898a260856c0211ca0938fbc215bf673 | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
from typing import Any, Dict, List, Tuple
import torch
from detectron2.data import MetadataCatalog
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.layers import ROIAlign
from detectron2.structures import BoxMode
from detectron2.utils.file_io import PathManager
from densepose.structures import DensePoseDataRelative, DensePoseList, DensePoseTransformData
def build_augmentation(cfg, is_train):
logger = logging.getLogger(__name__)
result = utils.build_augmentation(cfg, is_train)
if is_train:
random_rotation = T.RandomRotation(
cfg.INPUT.ROTATION_ANGLES, expand=False, sample_style="choice"
)
result.append(random_rotation)
logger.info("DensePose-specific augmentation used in training: " + str(random_rotation))
return result
class DatasetMapper:
"""
A customized version of `detectron2.data.DatasetMapper`
"""
def __init__(self, cfg, is_train=True):
self.augmentation = build_augmentation(cfg, is_train)
# fmt: off
self.img_format = cfg.INPUT.FORMAT
self.mask_on = (
cfg.MODEL.MASK_ON or (
cfg.MODEL.DENSEPOSE_ON
and cfg.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS)
)
self.keypoint_on = cfg.MODEL.KEYPOINT_ON
self.densepose_on = cfg.MODEL.DENSEPOSE_ON
assert not cfg.MODEL.LOAD_PROPOSALS, "not supported yet"
# fmt: on
if self.keypoint_on and is_train:
# Flip only makes sense in training
self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)
else:
self.keypoint_hflip_indices = None
if self.densepose_on:
densepose_transform_srcs = [
MetadataCatalog.get(ds).densepose_transform_src
for ds in cfg.DATASETS.TRAIN + cfg.DATASETS.TEST
]
assert len(densepose_transform_srcs) > 0
# TODO: check that DensePose transformation data is the same for
# all the datasets. Otherwise one would have to pass DB ID with
# each entry to select proper transformation data. For now, since
# all DensePose annotated data uses the same data semantics, we
# omit this check.
densepose_transform_data_fpath = PathManager.get_local_path(densepose_transform_srcs[0])
self.densepose_transform_data = DensePoseTransformData.load(
densepose_transform_data_fpath
)
self.is_train = is_train
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
image, transforms = T.apply_transform_gens(self.augmentation, image)
image_shape = image.shape[:2] # h, w
dataset_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32"))
if not self.is_train:
dataset_dict.pop("annotations", None)
return dataset_dict
for anno in dataset_dict["annotations"]:
if not self.mask_on:
anno.pop("segmentation", None)
if not self.keypoint_on:
anno.pop("keypoints", None)
# USER: Implement additional transformations if you have other types of data
# USER: Don't call transpose_densepose if you don't need
annos = [
self._transform_densepose(
utils.transform_instance_annotations(
obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices
),
transforms,
)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
if self.mask_on:
self._add_densepose_masks_as_segmentation(annos, image_shape)
instances = utils.annotations_to_instances(annos, image_shape, mask_format="bitmask")
densepose_annotations = [obj.get("densepose") for obj in annos]
if densepose_annotations and any(
v is not None for v in densepose_annotations
):
instances.gt_densepose = DensePoseList(
densepose_annotations, instances.gt_boxes, image_shape
)
dataset_dict["instances"] = instances[instances.gt_boxes.nonempty()]
return dataset_dict
def _transform_densepose(self, annotation, transforms):
if not self.densepose_on:
return annotation
# Handle densepose annotations
is_valid, reason_not_valid = DensePoseDataRelative.validate_annotation(annotation)
if is_valid:
densepose_data = DensePoseDataRelative(annotation, cleanup=True)
densepose_data.apply_transform(transforms, self.densepose_transform_data)
annotation["densepose"] = densepose_data
else:
# logger = logging.getLogger(__name__)
# logger.debug("Could not load DensePose annotation: {}".format(reason_not_valid))
DensePoseDataRelative.cleanup_annotation(annotation)
# NOTE: annotations for certain instances may be unavailable.
# 'None' is accepted by the DensePostList data structure.
annotation["densepose"] = None
return annotation
def _add_densepose_masks_as_segmentation(
self, annotations: List[Dict[str, Any]], image_shape_hw: Tuple[int, int]
):
for obj in annotations:
if ("densepose" not in obj) or ("segmentation" in obj):
continue
# DP segmentation: torch.Tensor [S, S] of float32, S=256
segm_dp = torch.zeros_like(obj["densepose"].segm)
segm_dp[obj["densepose"].segm > 0] = 1
segm_h, segm_w = segm_dp.shape
bbox_segm_dp = torch.tensor((0, 0, segm_h - 1, segm_w - 1), dtype=torch.float32)
# image bbox
x0, y0, x1, y1 = (
v.item() for v in BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS)
)
segm_aligned = (
ROIAlign((y1 - y0, x1 - x0), 1.0, 0, aligned=True)
.forward(segm_dp.view(1, 1, *segm_dp.shape), bbox_segm_dp)
.squeeze()
)
image_mask = torch.zeros(*image_shape_hw, dtype=torch.float32)
image_mask[y0:y1, x0:x1] = segm_aligned
# segmentation for BitMask: np.array [H, W] of np.bool
obj["segmentation"] = image_mask >= 0.5
|
py | b410abcb0659a58bbe55108a68197122ca8c2085 | from django.urls import path
from . import views
app_name = "server"
urlpatterns = [
path('', views.index, name='index'),
path('hosts/', views.hosts, name='hosts'),
path('host/<int:host_id>/', views.host, name='host'),
path('host/<int:host_id>/edit/', views.host_edit, name='host_edit'),
path('host/add/', views.host_add, name='host_add'),
path('users/', views.users, name='users'),
path('user/<int:user_id>/', views.user, name='user'),
path('user/<int:user_id>/edit/', views.user_edit, name='user_edit'),
path('user/add/', views.user_add, name='user_add'),
path('groups/', views.groups, name='groups'),
path('group/<int:group_id>/', views.group, name='group'),
path('group/<int:group_id>/edit/', views.group_edit, name='group_edit'),
path('group/add/', views.group_add, name='group_add'),
]
|
py | b410ae1760ad4aaf6fcd51855b56eacd068797ce | import AFQ.api.bundle_dict as abd
from AFQ.tests.test_api import create_dummy_bids_path
from AFQ.api.group import GroupAFQ
import AFQ.data as afd
import pytest
def test_AFQ_custom_bundle_dict():
bids_path = create_dummy_bids_path(3, 1)
bundle_dict = abd.BundleDict()
GroupAFQ(
bids_path,
preproc_pipeline="synthetic",
bundle_info=bundle_dict)
def test_BundleDict():
"""
Tests bundle dict
"""
# test defaults
afq_bundles = abd.BundleDict()
# bundles restricted within hemisphere
# NOTE: FA and FP cross midline so are removed
# NOTE: all others generate two bundles
num_hemi_bundles = (len(abd.BUNDLES)-2)*2
# bundles that cross the midline
num_whole_bundles = 2
assert len(afq_bundles) == num_hemi_bundles + num_whole_bundles
# Arcuate Fasciculus
afq_bundles = abd.BundleDict(["ARC"])
assert len(afq_bundles) == 2
# Forceps Minor
afq_bundles = abd.BundleDict(["FA"])
assert len(afq_bundles) == 1
# Cingulum Hippocampus
# not included but exists in templates
afq_bundles = abd.BundleDict(["HCC"])
assert len(afq_bundles) == 2
# Test "custom" bundle
afq_templates = afd.read_templates()
afq_bundles = abd.BundleDict({
"custom_bundle": {
"ROIs": [afq_templates["FA_L"],
afq_templates["FP_R"]],
"rules": [True, True],
"cross_midline": False,
"uid": 1}})
afq_bundles.get("custom_bundle")
assert len(afq_bundles) == 1
# Vertical Occipital Fasciculus
# not included and does not exist in afq templates
with pytest.raises(
ValueError,
match="VOF_R is not in AFQ templates"):
afq_bundles = abd.BundleDict(["VOF"])
afq_bundles["VOF_R"]
afq_bundles = abd.BundleDict(["VOF"], seg_algo="reco80")
assert len(afq_bundles) == 2
afq_bundles = abd.BundleDict(["whole_brain"], seg_algo="reco80")
assert len(afq_bundles) == 1
|
py | b410aefb3055de356f0092d8145e93f000a0f56a | import os
from pyniel.python_tools.path_tools import make_dir_if_not_exists
from navrep.tools.commonargs import parse_common_args
from navrep.envs.e2eenv import E2E1DIANEnv
from navrep.scripts.cross_test_navreptrain_in_ianenv import run_test_episodes
from navrep.scripts.test_e2e import E2E1DCPolicy
if __name__ == '__main__':
args, _ = parse_common_args()
if args.n is None:
args.n = 1000
collect_trajectories = False
env = E2E1DIANEnv(silent=True, collect_trajectories=collect_trajectories)
policy = E2E1DCPolicy()
S = run_test_episodes(env, policy, render=args.render, num_episodes=args.n)
DIR = os.path.expanduser("~/navrep/eval/crosstest")
if args.dry_run:
DIR = "/tmp/navrep/eval/crosstest"
make_dir_if_not_exists(DIR)
if collect_trajectories:
NAME = "e2e1dnavreptrain_in_ianenv_{}.pckl".format(len(S))
PATH = os.path.join(DIR, NAME)
S.to_pickle(PATH)
else:
NAME = "e2e1dnavreptrain_in_ianenv_{}.csv".format(len(S))
PATH = os.path.join(DIR, NAME)
S.to_csv(PATH)
print("{} written.".format(PATH))
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.