code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
from .baseorder import BaseOrder
from .helper import direction_from_amount, order_duration_spec
import saxo_openapi.definitions.orders as OD
from .mixin import OnFillHnd
class StopOrder(BaseOrder, OnFillHnd):
"""create a StopOrder.
StopOrder is used to build the body for a StopOrder. The body can be
used to pass to the Order endpoint.
"""
# allowed OrderDurationTypes:
ALLOWED_DT = [OD.OrderDurationType.DayOrder,
OD.OrderDurationType.GoodTillDate,
OD.OrderDurationType.GoodTillCancel]
def __init__(self,
Uic,
Amount,
AssetType,
OrderPrice,
ManualOrder=False,
AmountType=OD.AmountType.Quantity,
TakeProfitOnFill=None,
StopLossOnFill=None,
TrailingStopLossOnFill=None,
OrderDurationType=OD.OrderDurationType.DayOrder,
GTDDate=None):
"""
Instantiate a StopOrder.
Parameters
----------
Uic: int (required)
the Uic of the instrument to trade
Amount: decimal (required)
the number of lots/shares/contracts or a monetary value
if amountType is set to CashAmount
OrderPrice: decimal (required)
the price indicating the limitprice
AssetType: string (required)
the assettype for the Uic
ManualOrder: bool (required)
flag to identify if an order is from an automated origin,
default: False
AmountType: AmountType (optional)
the amountType, defaults to Quantity, see AmountType for
other options
TakeProfitOnFill: TakeProfitDetails instance or dict
the take-profit order specification
StopLosstOnFill: StopLossDetails instance or dict
the stoploss order specification
TrailingStopLosstOnFill: TrailingStopLossDetails instance or dict
the Trailingstoploss order specification
OrderDurationType: string, default DayOrder
the order duration type, check SAXO Bank specs. for details
GTDDate: datetime string (required if order duration is GoodTillDate)
the GTD-datetime
Example
-------
>>> import json
>>> from saxo_openapi import API
>>> import saxo_openapi.endpoints.trading as tr
>>> from saxo_openapi.contrib.orders import StopOrder
>>>
>>> so = StopOrder(Uic=21,
... AssetType=OD.AssetType.FxSpot,
... Amount=10000,
... OrderPrice=1.1025)
>>> print(json.dumps(so.data, indent=2))
{
"Uic": 21,
"AssetType": "FxSpot",
"Amount": 10000,
"Price": 1.1025,
"BuySell": "Buy",
"OrderType": "Stop",
"ManualOrder": false,
"AmountType": "Quantity",
"OrderDuration": {
"DurationType": "DayOrder"
}
}
>>> # now we have the order specification, create the order request
>>> r = tr.orders.Order(data=so.data)
>>> # perform the request
>>> rv = client.request(r)
>>> print(rv)
>>> print(json.dumps(rv, indent=4))
{
"OrderId": "76697286"
}
"""
super(StopOrder, self).__init__()
# by default for a StopOrder
da = {
'OrderType': OD.OrderType.Stop,
'AmountType': AmountType,
}
da.update({'OrderDuration': order_duration_spec(OrderDurationType,
self.ALLOWED_DT,
GTDDate)})
# required
self._data.update({"Uic": Uic})
self._data.update({"AssetType": AssetType})
self._data.update({"Amount": abs(Amount)})
self._data.update({"ManualOrder": ManualOrder})
self._data.update({"OrderPrice": OrderPrice})
self._data.update({"BuySell": direction_from_amount(Amount)})
self._data.update(da)
self.hndOnFill(
TakeProfitOnFill=TakeProfitOnFill,
StopLossOnFill=StopLossOnFill,
TrailingStopLossOnFill=TrailingStopLossOnFill)
@property
def data(self):
"""data property.
return the JSON body.
"""
return super(StopOrder, self).data
class StopOrderFxSpot(StopOrder):
"""StopOrderFxSpot - StopOrder for FxSpot only.
The StopOrderFxSpot lacks the AssetType parameter and only serves
the AssetType FxSpot.
"""
def __init__(self,
Uic,
Amount,
OrderPrice,
ManualOrder=False,
AmountType=OD.AmountType.Quantity,
TakeProfitOnFill=None,
StopLossOnFill=None,
TrailingStopLossOnFill=None,
OrderDurationType=OD.OrderDurationType.DayOrder,
GTDDate=None):
"""
Instantiate a StopOrderFxSpot.
Parameters
----------
Uic: int (required)
the Uic of the instrument to trade
Amount: decimal (required)
the number of lots/shares/contracts or a monetary value
if amountType is set to CashAmount
OrderPrice: decimal (required)
the price indicating the limitprice
ManualOrder: bool (required)
flag to identify if an order is from an automated origin,
default: False
AmountType: AmountType (optional)
the amountType, defaults to Quantity, see AmountType for
other options
TakeProfitOnFill: TakeProfitDetails instance or dict
the take-profit order specification
StopLosstOnFill: StopLossDetails instance or dict
the stoploss order specification
TrailingStopLosstOnFill: TrailingStopLossDetails instance or dict
the Trailingstoploss order specification
OrderDurationType: string, default DayOrder
the order duration type, check SAXO Bank specs. for details
GTDDate: datetime string (required if order duration is GoodTillDate)
the GTD-datetime
Example
-------
>>> from saxo_openapi import API
>>> from saxo_openapi.contrib.orders import (
... tie_account_to_order,
... StopOrderFxSpot)
>>> token = "..."
>>> client = API(access_token=token)
>>> order = tie_account_to_order(
... AccountKey,
... StopOrderFxSpot(Uic=21, Amount=25000, OrderPrice=1.1025))
>>> r = tr.orders.Order(data=order)
>>> rv = client.request(r)
>>> print(json.dumps(rv, indent=2))
{
"OrderId": "76703544"
}
"""
super(StopOrderFxSpot, self).__init__(
Uic=Uic,
Amount=Amount,
OrderPrice=OrderPrice,
ManualOrder=ManualOrder,
AmountType=AmountType,
AssetType=OD.AssetType.FxSpot,
OrderDurationType=OrderDurationType,
TakeProfitOnFill=TakeProfitOnFill,
StopLossOnFill=StopLossOnFill,
TrailingStopLossOnFill=TrailingStopLossOnFill,
GTDDate=GTDDate)
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/contrib/orders/stoporder.py
| 0.869687 | 0.284954 |
stoporder.py
|
pypi
|
def dyndoc_insert(src):
"""docstring_insert - a decorator to insert API-docparts dynamically."""
# manipulating docstrings this way is tricky due to indentation
# the JSON needs leading whitespace to be interpreted correctly
import json
import re
def mkblock(d, flag=0):
# response, pretty formatted
v = json.dumps(d, indent=2)
if flag == 1:
# strip the '[' and ']' in case of a list holding items
# that stand on their own (example: tick records from a stream)
nw = re.findall('.*?[(.*)]', v, flags=re.S)
v = nw[0]
# add leading whitespace for each line and start with a newline
return "\n{}".format("".join(["{0:>16}{1}\n".format("", L)
for L in v.split('\n')]))
def dec(obj):
allSlots = re.findall("{(_v3.*?)}", obj.__doc__)
docsub = {}
sub = {}
for k in allSlots:
p = re.findall("^(_v3.*)_(.*)", k)
p = list(*p)
sub.update({p[1]: p[0]})
for v in sub.values():
for k in sub.keys():
docsub["{}_url".format(v)] = "{}".format(src[v]["url"])
if "resp" == k:
docsub.update({"{}_resp".format(v):
mkblock(src[v]["response"])})
if "body" == k:
docsub.update({"{}_body".format(v):
mkblock(src[v]["body"])})
if "params" == k:
docsub.update({"{}_params".format(v):
mkblock(src[v]["params"])})
if "route" == k:
docsub.update({"{}_route".format(v):
mkblock(src[v]["route"])})
if "ciresp" == k:
docsub.update({"{}_ciresp".format(v):
mkblock(src[v]["response"], 1)})
obj.__doc__ = obj.__doc__.format(**docsub)
return obj
return dec
def endpoint(url, method="GET", expected_status=200):
"""endpoint - decorator to manipulate the REST-service endpoint.
The endpoint decorator sets the endpoint and the method for the class
to access the REST-service.
"""
def dec(obj):
obj.ENDPOINT = url
obj.METHOD = method
obj.EXPECTED_STATUS = expected_status
return obj
return dec
def abstractclass(cls):
"""abstractclass - class decorator.
make sure the class is abstract and cannot be used on it's own.
@abstractclass
class A(object):
def __init__(self, *args, **kwargs):
# logic
pass
class B(A):
pass
a = A() # results in an AssertionError
b = B() # works fine
"""
setattr(cls, "_ISNEVER", cls.__bases__[0].__name__)
origInit = cls.__dict__["__init__"]
def wrapInit(self, *args, **kwargs):
# when the class is instantiated we can check for bases
# we don't want it to be the base class
if self.__class__.__bases__[-1].__name__ != self._ISNEVER:
origInit(self, *args, **kwargs)
else:
raise TypeError("Use of abstract base class")
# replace the original __init__
setattr(wrapInit, "__doc__", getattr(origInit, "__doc__"))
setattr(origInit, "__doc__", "")
setattr(cls, "__init__", wrapInit)
return cls
class extendargs(object):
"""'extendargs' decorator.
Add extra arguments to the argumentlist of the constructor of the class.
"""
def __init__(self, *loa):
self.loa = loa
def __call__(self, cls):
# save parent class __init__
origInit = cls.__bases__[0].__dict__["__init__"]
def wrapInit(wself, *args, **kwargs):
for extraArg in self.loa:
if extraArg in kwargs:
setattr(wself, extraArg, kwargs[extraArg])
del kwargs[extraArg]
origInit(wself, *args, **kwargs)
setattr(cls, "__init__", wrapInit)
return cls
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/decorators.py
| 0.689933 | 0.281356 |
decorators.py
|
pypi
|
from .base import Charts
from ..decorators import dyndoc_insert, endpoint
from .responses.charts import responses
@endpoint("openapi/chart/v1/charts")
class GetChartData(Charts):
"""Return chart data as specified by request parameters."""
@dyndoc_insert(responses)
def __init__(self, params):
"""Instantiate a GetChartData request.
Parameters
----------
params : dict (required)
dict representing the request parameters.
Required in params: AssetType, Horizon and Uic
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.chart as chart
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> params = {_v3_GetChartData_params}
>>> r = chart.charts.GetChartData(params=params)
>>> client.request(r)
>>> print(json.dumps(rv, indent=2))
::
{_v3_GetChartData_resp}
"""
super(GetChartData, self).__init__()
self.params = params
@endpoint("openapi/chart/v1/charts/subscriptions", "POST", 201)
class CreateChartDataSubscription(Charts):
"""Sets up a subscription and returns an initial snapshot of most recently
completed samples specified by the parameters in the request.
Subsequent samples are delivered over the streaming channel. Most often
a single new sample or sample update is delivered at a time, but when a
sample closes, you will typcially get two samples: The now closed bar, and
the bar just opening.
"""
HEADERS = {"Content-Type": "application/json"}
@dyndoc_insert(responses)
def __init__(self, data):
"""Instantiate a CreateChartDataSubscription request.
Parameters
----------
data : dict (required)
dict representing the data body, in this case an order spec.
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.chart as ch
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> data = {_v3_CreateChartDataSubscription_body}
>>> r = ch.charts.CreateChartDataSubscription(data=data)
>>> client.request(r)
>>> print(json.dumps(rv, indent=2))
::
{_v3_CreateChartDataSubscription_resp}
"""
super(CreateChartDataSubscription, self).__init__()
self.data = data
@endpoint("openapi/chart/v1/charts/subscriptions/{ContextId}", "DELETE", 202)
class ChartDataRemoveSubscriptions(Charts):
"""Removes all subscriptions for the current session on this resource, and
frees all resources on the server.
"""
RESPONSE_DATA = None
@dyndoc_insert(responses)
def __init__(self, ContextId, params=None):
"""Instantiate a ChartDataRemoveSubscriptions request.
Parameters
----------
ContextId: string (required)
the ContextId
params: dict (optional)
dict representing the querystring parameters
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.chart as ch
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> params = {_v3_ChartDataRemoveSubscriptions_params}
>>> ContextId = ...
>>> r = ch.charts.ChartDataRemoveSubscriptions(ContextId,
... params=params)
>>> client.request(r)
>>> assert r.status_code == r.expected_status
No data is returned.
"""
super(ChartDataRemoveSubscriptions, self).__init__(ContextId=ContextId)
self.params = params
@endpoint("openapi/chart/v1/charts/subscriptions/{ContextId}/{ReferenceId}",
"DELETE", 202)
class ChartDataRemoveSubscription(Charts):
"""Removes subscriptions for the given reference id on this resource, and
frees resources on the server.
"""
RESPONSE_DATA = None
@dyndoc_insert(responses)
def __init__(self, ContextId, ReferenceId):
"""Instantiate a ChartDataRemoveSubscription request.
Parameters
----------
ContextId: string (required)
the context id
ReferenceId: string (required)
the reference id
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.chart as ch
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> ContextId = ...
>>> ReferenceId = ...
>>> r = ch.charts.ChartDataRemoveSubscription(ContextId, ReferenceId)
>>> client.request(r)
>>> assert r.status_code == r.expected_status
No data is returned.
"""
super(ChartDataRemoveSubscription, self).__init__(
ContextId=ContextId,
ReferenceId=ReferenceId)
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/chart/charts.py
| 0.754101 | 0.206514 |
charts.py
|
pypi
|
responses = {
"_v3_AccountPerformance": {
"url": "/openapi/hist/v3/perf/{ClientKey}",
"params": {
'FromDate': '2019-03-01',
'ToDate': '2019-03-10'
},
"response": {
"AccountSummary": {
"AverageTradeDurationInMinutes": 34260,
"AverageTradesPerWeek": 48.2325728770595,
"NumberOfDaysTraded": 1589,
"NumberOfLongTrades": 5434,
"NumberOfShortTrades": 5263,
"TopTradedInstruments": [
"DAX.I",
"DJI.I",
"EURUSD",
"GBPUSD",
"NAS100.I",
"NOKSEK",
"EURNOK",
"SP500.I"
],
"TotalReturnFraction": -0.9999963956455,
"TradedInstruments": [
"FxSpot",
"Stock",
"FxVanillaOption",
"ContractFutures"
],
"TradesTotalCount": 10697,
"TradesWonCount": 6499,
"WinFraction": 0.61
},
"Allocation": {
"TradesPerAssetType": {
"ClosedTradesAllocations": [
{
"AssetClassType": "Equity",
"ReturnAttribution": 1,
"TradeCount": 168,
"TradePercent": 0.38009049773755654
},
{
"AssetClassType": "Currency",
"ReturnAttribution": 0.249937016456527,
"TradeCount": 112,
"TradePercent": 0.25339366515837103
},
{
"AssetClassType": "Commodity",
"ReturnAttribution": 0.5628789450009563,
"TradeCount": 105,
"TradePercent": 0.23755656108597284
},
{
"AssetClassType": "Fixed Income",
"ReturnAttribution": -0.013150856136249162,
"TradeCount": 57,
"TradePercent": 0.12895927601809956
}
]
},
"TradesPerInstrument": {
"ClosedTradesAllocations": [
{
"AssetType": "ContractFutures",
"InstrumentDescription": "30-Year U.S. "
"Treasury Bond - Sep 2016",
"InstrumentSymbol": "ZBU6",
"InstrumentUic": 3626018,
"ReturnAttribution": -0.15987101005684304,
"TradeCount": 40,
"TradePercent": 0.09049773755656108
},
{
"AssetType": "FxSpot",
"InstrumentDescription": "British Pound/US Dollar",
"InstrumentSymbol": "GBPUSD",
"InstrumentUic": 31,
"ReturnAttribution": 0.14685155225185834,
"TradeCount": 37,
"TradePercent": 0.083710407239819
}
]
}
},
"BalancePerformance": {
"AccountBalanceTimeSeries": [
{
"Date": "2016-03-28",
"Value": 0
},
{
"Date": "2016-03-29",
"Value": 0
}
],
"AccountValueTimeSeries": [
{
"Date": "2016-03-28",
"Value": 0
},
{
"Date": "2016-03-29",
"Value": 0
}
],
"MonthlyProfitLossTimeSeries": [
{
"Date": "2015-11-30",
"Value": 0
},
{
"Date": "2015-12-31",
"Value": 0
}
],
"SecurityTransferTimeSeries": [
{
"Date": "2016-03-28",
"Value": 0
},
{
"Date": "2016-03-29",
"Value": 0
}
],
"YearlyProfitLossTimeSeries": [
{
"Date": "2015-12-31",
"Value": 0
},
{
"Date": "2016-12-31",
"Value": 0
},
{
"Date": "2017-12-31",
"Value": 0
}
]
},
"From": "2016-03-28",
"InceptionDay": "2015-11-24",
"LastTradeDay": "2017-03-27",
"Thru": "2017-03-27",
"TimeWeightedPerformance": {
"AccumulatedTimeWeightedTimeSeries": [
{
"Date": "2016-03-25",
"Value": 0
}
],
"MonthlyReturnTimeSeries": [
{
"Date": "2016-03-25",
"Value": 0
}
],
"PerformanceFraction": -1,
"PerformanceKeyFigures": {
"ClosedTradesCount": 0,
"DrawdownReport": {
"Drawdowns": [
{
"DaysCount": 3,
"DepthInPercent": 1,
"FromDate": "2016-08-05",
"ThruDate": "2016-08-08"
}
],
"MaxDaysInDrawdownFromTop10Drawdowns": 3
},
"LosingDaysFraction": 0.03,
"MaxDrawDownFraction": 1,
"ReturnFraction": -1,
"SampledStandardDeviation": 0.0618018874919214,
"SharpeRatio": -0.952069751000777,
"SortinoRatio": -0.0591710418985739
},
"YearlyReturnTimeSeries": [
{
"Date": "2016-03-25",
"Value": 0
}
]
},
"TotalCashBalance": 20226.02,
"TotalCashBalancePerCurrency": [
{
"StringValue": "CAD",
"Value": -491.707122366824
}
],
"TotalOpenPositionsValue": 29571.057,
"TotalPositionsValuePerCurrency": [
{
"StringValue": "CAD",
"Value": -491.707122366824
}
],
"TotalPositionsValuePerProductPerSecurity": [
{
"Description": "Abengoa SA - Warrants",
"ProductName": "Shares",
"Symbol": "LOCK - 1496:xxxx",
"Value": 0
}
]
}
},
}
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/accounthistory/responses/performance.py
| 0.611266 | 0.337913 |
performance.py
|
pypi
|
responses = {
"_v3_HistoricalPositions": {
"url": "/openapi/hist/v3/positions/{ClientKey}",
"params": {
'FromDate': '2019-03-01',
'ToDate': '2019-03-10'
},
"response": {
"Data": [
{
"AccountId": "112209INET",
"AccountValueEndOfDay": {
"AccountBalance": 7526.17183,
"CashTransfers": 0,
"Date": "2016-07-19",
"PositionsValue": -978.29753,
"SecurityTransfers": 0,
"TotalValue": 6547.8743
},
"Amount": -1,
"AmountAccountValueCloseRatio": "2:1",
"AmountAccountValueOpenRatio": "2:1",
"ClosingAssetType": "CfdOnIndex",
"ClosingTradeDate": "2016-07-19",
"ClosingValueDate": "2016-07-19",
"CopiedFrom": "1",
"CorrelationType": "None",
"Decimals": 2,
"ExecutionTimeClose": "2016-07-19T07:25:37.000000Z",
"ExecutionTimeOpen": "2016-07-18T10:38:06.000000Z",
"FigureValue": 1,
"InstrumentCcyToAccountCcyRateClose": 1.1020982542939,
"InstrumentCcyToAccountCcyRateOpen": 1.11308229426434,
"InstrumentSymbol": "GER30.I",
"LongShort": {
"PresentationValue": "Short",
"Value": "Short"
},
"OpeningAssetType": "CfdOnIndex",
"OpeningTradeDate": "2016-07-18",
"OpeningValueDate": "2016-07-18",
"PriceClose": 9998,
"PriceGain": 0.004778021102926538,
"PriceOpen": 10046,
"PricePct": -0.4778021102926538,
"ProfitLoss": 52.87,
"ProfitLossAccountValueFraction": 0.00807437613761156,
"Uic": "1373",
"ValueInAccountCurrencyClose": -11018.778346430412,
"ValueInAccountCurrencyOpen": -11182.02472817956
}
]
}
},
}
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/accounthistory/responses/historicalpositions.py
| 0.503418 | 0.303532 |
historicalpositions.py
|
pypi
|
from .base import ValueAdd
from ..decorators import dyndoc_insert, endpoint
from .responses.pricealerts import responses
@endpoint("openapi/vas/v1/pricealerts/definitions/")
class GetAllAlerts(ValueAdd):
"""Get an unsorted list of all the price alert definitions belonging to
the current user where the state matches the specified value. The alerts
might belong to different accounts, should the user have more than one.
"""
@dyndoc_insert(responses)
def __init__(self, params):
"""Instantiate a GetAllAlerts request.
Parameters
----------
data : dict (required)
dict representing the querystring parameters.
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.valueadd as va
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> params = {_v3_GetAllAlerts_params}
>>> r = va.pricealerts.GetAllAlerts(params=params)
>>> client.request(r)
>>> print(json.dumps(rv, indent=2))
::
{_v3_GetAllAlerts_resp}
"""
super(GetAllAlerts, self).__init__()
self.params = params
@endpoint("openapi/vas/v1/pricealerts/definitions/{AlertDefinitionId}")
class GetAlertDefinition(ValueAdd):
"""Gets the specified price alert for the current user."""
@dyndoc_insert(responses)
def __init__(self, AlertDefinitionId):
"""Instantiate a GetAlertDefinition request.
Parameters
----------
AlertDefinitionId: string (required)
the alert definition id.
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.valueadd as va
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> AlertDefinitionId = 30384
>>> r = va.pricealerts.GetAlertDefinition(AlertDefinitionId)
>>> client.request(r)
>>> print(json.dumps(rv, indent=2))
::
{_v3_GetAlertDefinition_resp}
"""
super(GetAlertDefinition, self).__init__(
AlertDefinitionId=AlertDefinitionId)
@endpoint("openapi/vas/v1/pricealerts/definitions/", "POST", 201)
class CreatePriceAlert(ValueAdd):
"""Create a new price alert definition. The created definition is
returned with a couple of more properties, the price alert definition
ID being one of them.
"""
@dyndoc_insert(responses)
def __init__(self, data):
"""Instantiate a CreatePriceAlert request.
Parameters
----------
data: dict (required)
dict representing the body parameters.
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.valueadd as va
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> data = {_v3_CreatePriceAlert_body}
>>> r = va.pricealerts.CreatePriceAlert(AlertDefinitionId)
>>> client.request(r)
>>> print(json.dumps(rv, indent=2))
::
{_v3_CreatePriceAlert_resp}
"""
super(CreatePriceAlert, self).__init__()
self.data = data
@endpoint("openapi/vas/v1/pricealerts/definitions/{AlertDefinitionId}",
"PUT", 204)
class UpdatePriceAlert(ValueAdd):
"""Update a price alert definition for the current user."""
RESPONSE_DATA = None
@dyndoc_insert(responses)
def __init__(self, AlertDefinitionId, data):
"""Instantiate an UpdatePriceAlert request.
Parameters
----------
data: dict (required)
dict representing the body parameters.
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.valueadd as va
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> AlertDefinitionId = 30384
>>> data = {_v3_UpdatePriceAlert_body}
>>> r = va.pricealerts.UpdatePriceAlert(AlertDefinitionId, data=data)
>>> client.request(r)
>>> assert r.status_code == r.expected_status
No response data is returned.
"""
super(UpdatePriceAlert, self).__init__(
AlertDefinitionId=AlertDefinitionId)
self.data = data
@endpoint("openapi/vas/v1/pricealerts/definitions/{AlertDefinitionIds}",
"DELETE", 204)
class DeletePriceAlert(ValueAdd):
"""Delete the specified price alert definitions. The alerts have to
belong to the current user.
"""
RESPONSE_DATA = None
@dyndoc_insert(responses)
def __init__(self, AlertDefinitionIds):
"""Instantiate a DeletePriceAlert request.
Parameters
----------
AlertDefinitionIds: string (required)
string with ','-delimited AlertDefinitionIds
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.valueadd as va
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> AlertDefinitionIds = '30384,30386'
>>> r = va.pricealerts.DeletePriceAlert(AlertDefinitionIds)
>>> client.request(r)
>>> assert r.status_code == r.expected_status
No response data is returned.
"""
super(DeletePriceAlert, self).__init__(
AlertDefinitionIds=AlertDefinitionIds)
@endpoint("openapi/vas/v1/pricealerts/usersettings/")
class GetUserNotificationSettings(ValueAdd):
"""Get the current user's price alert notification settings."""
@dyndoc_insert(responses)
def __init__(self):
"""Instantiate a GetUserNotificationSettings request.
Parameters
----------
None
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.valueadd as va
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> r = va.pricealerts.GetUserNotificationSettings()
>>> client.request(r)
>>> print(json.dumps(rv, indent=2))
::
{_v3_GetUserNotificationSettings_resp}
"""
super(GetUserNotificationSettings, self).__init__()
@endpoint("openapi/vas/v1/pricealerts/usersettings/", "PUT", 204)
class ModifyUserNotificationSettings(ValueAdd):
"""Modify the current user's price alert notification settings."""
RESPONSE_DATA = None
@dyndoc_insert(responses)
def __init__(self, data):
"""Instantiate a ModifyUserNotificationSettings request.
Parameters
----------
data: dict (required)
dict representing the body parameters.
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.valueadd as va
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> data = {_v3_ModifyUserNotificationSettings_body}
>>> r = va.pricealerts.ModifyUserNotificationSettings(data=data)
>>> client.request(r)
>>> assert r.status_code == r.expected_status
No response data is returned.
"""
super(ModifyUserNotificationSettings, self).__init__()
self.data = data
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/valueadd/pricealerts.py
| 0.657538 | 0.183447 |
pricealerts.py
|
pypi
|
from ..decorators import dyndoc_insert, endpoint
from .base import ReferenceData
from .responses.instruments import responses
@endpoint("openapi/ref/v1/instruments/")
class Instruments(ReferenceData):
"""Get a list of summary information for all instruments and options
on the Saxo Trading platform restricted by the access rights of the user.
"""
@dyndoc_insert(responses)
def __init__(self, params):
"""Instantiate an Instruments request.
Parameters
----------
params: dict (required)
dict reppresenting the querystring parameters
>>> import json
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.referencedata as rd
>>> client = saxo_openapi.API(access_token=...)
>>> params = {_v3_Instruments_params}
>>> r = rd.instruments.Instruments(params=params)
>>> client.request(r)
>>> print(json.dumps(r.response, indent=4))
Output::
{_v3_Instruments_resp}
"""
super(Instruments, self).__init__()
self.params = params
@endpoint("openapi/ref/v1/instruments/details")
class InstrumentsDetails(ReferenceData):
"""Get detailed information on a list of instruments."""
@dyndoc_insert(responses)
def __init__(self, params):
"""Instantiate an InstrumentsDetails request.
Parameters
----------
params: dict (required)
dict reppresenting the querystring parameters
>>> import json
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.referencedata as rd
>>> client = saxo_openapi.API(access_token=...)
>>> params = {_v3_InstrumentsDetails_params}
>>> r = rd.instruments.InstrumentsDetails(params=params)
>>> client.request(r)
>>> print(json.dumps(r.response, indent=4))
Output::
{_v3_InstrumentsDetails_resp}
"""
super(InstrumentsDetails, self).__init__()
self.params = params
@endpoint("openapi/ref/v1/instruments/details/{Uic}/{AssetType}")
class InstrumentDetails(ReferenceData):
"""Get detailed information for a specific instrument."""
@dyndoc_insert(responses)
def __init__(self, Uic, AssetType, params=None):
"""Instantiate an InstrumentDetails request.
Parameters
----------
Uic: int (required)
the Uic of the instrument
AssetType: string (required)
the AssetType specification
params: dict (optional)
dict representing querystring parameters
>>> import json
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.referencedata as rd
>>> client = saxo_openapi.API(access_token=...)
>>> Uic = 22
>>> AssetType = 'FxSpot'
>>> params = {_v3_InstrumentDetails_params}
>>> r = rd.instruments.InstrumentDetails(Uic=Uic,
... AssetType=AssetType,
... params=params)
>>> client.request(r)
>>> print(json.dumps(r.response, indent=4))
Output::
{_v3_InstrumentDetails_resp}
"""
super(InstrumentDetails, self).__init__(Uic=Uic,
AssetType=AssetType)
self.params = params
@endpoint("openapi/ref/v1/instruments/contractoptionspaces/{OptionRootId}")
class ContractoptionSpaces(ReferenceData):
"""Get contractoption data."""
@dyndoc_insert(responses)
def __init__(self, OptionRootId, params=None):
"""Instantiate a ContractoptionSpaces request.
Parameters
----------
OptionRootId: string (required)
the OptionRootId
params: dict (optional)
dict representing querystring parameters
>>> import json
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.referencedata as rd
>>> client = saxo_openapi.API(access_token=...)
>>> OptionRootId = 231
>>> params = {_v3_ContractoptionSpaces_params}
>>> r = rd.instruments.ContractoptionSpaces(
... OptionRootId=OptionRootId,
... params=params)
>>> client.request(r)
>>> print(json.dumps(r.response, indent=4))
Output::
{_v3_ContractoptionSpaces_resp}
"""
super(ContractoptionSpaces, self).__init__(
OptionRootId=OptionRootId)
self.params = params
@endpoint("openapi/ref/v1/instruments/futuresspaces/{ContinuousFuturesUic}")
class FuturesSpaces(ReferenceData):
"""Get futures spaces data."""
# @dyndoc_insert(responses)
def __init__(self, ContinuousFuturesUic):
"""Instantiate a ContractoptionSpaces request.
Parameters
----------
ContinuousFuturesUic: string (required)
the ContinuousFuturesUic
>>> import json
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.referencedata as rd
>>> client = saxo_openapi.API(access_token=...)
>>> ContinuousFuturesUic = '...'
>>> r = rd.instruments.FuturesSpaces(
... ContinuousFuturesUic=ContinuousFuturesUic)
>>> client.request(r)
>>> print(json.dumps(r.response, indent=4))
Output::
{_v3_FuturesSpaces_resp}
"""
super(FuturesSpaces, self).__init__(
ContinuousFuturesUic=ContinuousFuturesUic)
@endpoint("openapi/ref/v1/instruments/tradingschedule/{Uic}/{AssetType}")
class TradingSchedule(ReferenceData):
"""Get TradingSchedule data."""
# @dyndoc_insert(responses)
def __init__(self, Uic, AssetType):
"""Instantiate a TradingSchedule request.
Parameters
----------
Uic: string (required)
the Uic of the instrument
AssetType: string (required)
the AssetType of the instrument
For one Uic multiple assettypes can be available trading
on different times.
>>> import json
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.referencedata as rd
>>> client = saxo_openapi.API(access_token=...)
>>> Uic = 21
>>> AssetType = "FxSpot"
>>> r = rd.instruments.ContractoptionSpaces(
... Uic=Uic,
... AssetType=AssetType)
>>> client.request(r)
>>> print(json.dumps(r.response, indent=4))
Output::
{_v3_TradingSchedule_resp}
"""
super(TradingSchedule, self).__init__(
Uic=Uic,
AssetType=AssetType)
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/referencedata/instruments.py
| 0.67822 | 0.160365 |
instruments.py
|
pypi
|
from ..decorators import dyndoc_insert, endpoint
from .base import ReferenceData
from .responses.standarddates import responses
@endpoint("openapi/ref/v1/standarddates/forwardtenor/{Uic}")
class ForwardTenorDates(ReferenceData):
"""Get a list of forward tenor dates for an UIC."""
@dyndoc_insert(responses)
def __init__(self, Uic, params=None):
"""Instantiate a ForwardTenorDates request.
Parameters
----------
Uic: int (required)
the Uic code of the instrument
params: dict (required)
dict with parameters representing the querystring
>>> import json
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.referencedata as rd
>>> client = saxo_openapi.API(access_token=...)
>>> params = {_v3_ForwardTenorDates_params}
>>> Uic = 22
>>> r = rd.ForwardTenorDates(Uic=Uic, params=params)
>>> client.request(r)
>>> print(json.dumps(r.response, indent=4))
Output::
{_v3_ForwardTenorDates_resp}
"""
super(ForwardTenorDates, self).__init__(Uic=Uic)
self.params = params
@endpoint("openapi/ref/v1/standarddates/fxoptionexpiry/{Uic}")
class FXOptionExpiryDates(ReferenceData):
"""Get a list of FX option expiry dates for an UIC."""
@dyndoc_insert(responses)
def __init__(self, Uic):
"""Instantiate a FXOptionExpiryDates request.
Parameters
----------
Uic: int (required)
the Uic code of the instrument
>>> import json
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.referencedata as rd
>>> client = saxo_openapi.API(access_token=...)
>>> Uic = 22
>>> r = rd.FXOptionExpiryDates(Uic=Uic)
>>> client.request(r)
>>> print(json.dumps(r.response, indent=2))
Output::
{_v3_FXOptionExpiryDates_resp}
"""
super(FXOptionExpiryDates, self).__init__(Uic=Uic)
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/referencedata/standarddates.py
| 0.760384 | 0.192805 |
standarddates.py
|
pypi
|
from ..decorators import dyndoc_insert, endpoint
from .base import ReferenceData
from .responses.algostrategies import responses
@endpoint("openapi/ref/v1/algostrategies/")
class AlgoStrategies(ReferenceData):
"""Retrieve a list of strategies with detailed information about each
strategy. The response also contains links to other relevant data, such
as their parameters.
"""
@dyndoc_insert(responses)
def __init__(self, params=None):
"""Instantiate an AlgoStrategies request.
Parameters
----------
params: dict (required)
dict representing the querystring parameters
>>> import json
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.referencedata as rd
>>> client = saxo_openapi.API(access_token=...)
>>> params = {_v3_AlgoStrategies_params}
>>> r = rd.algostrategies.AlgoStrategies(params=params)
>>> client.request(r)
>>> print(json.dumps(r.response, indent=4))
Output::
{_v3_AlgoStrategies_resp}
"""
super(AlgoStrategies, self).__init__()
self.params = params
@endpoint("openapi/ref/v1/algostrategies/{Name}")
class AlgoStrategyDetails(ReferenceData):
"""Retrieve detailed information about a specific Strategy."""
@dyndoc_insert(responses)
def __init__(self, Name):
"""Instantiate an AlgoStrategyDetails request.
Parameters
----------
Name: string (required)
Name of the strategy
>>> import json
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.referencedata as rd
>>> client = saxo_openapi.API(access_token=...)
>>> Name = "Implementation Shortfall"
>>> r = rd.algostrategies.AlgoStrategyDetails(Name=Name)
>>> client.request(r)
>>> print(json.dumps(r.response, indent=4))
Output::
{_v3_AlgoStrategyDetails_resp}
"""
super(AlgoStrategyDetails, self).__init__(Name=Name)
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/referencedata/algostrategies.py
| 0.672332 | 0.192767 |
algostrategies.py
|
pypi
|
responses = {
"_v3_ExchangeList": {
"url": "/openapi/ref/v1/exchanges",
"response": {
"__count": 181,
"Data": [
{
"AllDay": False,
"CountryCode": "US",
"Currency": "USD",
"ExchangeId": "NYSE_ARCA",
"ExchangeSessions": [
{
"EndTime": "2019-03-04T12:00:00.000000Z",
"StartTime": "2019-03-01T21:00:00.000000Z",
"State": "Closed"
},
{
"EndTime": "2019-03-04T14:30:00.000000Z",
"StartTime": "2019-03-04T12:00:00.000000Z",
"State": "PreTrading"
},
{
"EndTime": "2019-03-04T21:00:00.000000Z",
"StartTime": "2019-03-04T14:30:00.000000Z",
"State": "AutomatedTrading"
},
{
"EndTime": "2019-03-05T12:00:00.000000Z",
"StartTime": "2019-03-04T21:00:00.000000Z",
"State": "Closed"
}
],
"Mic": "ARCX",
"Name": "New York Stock Exchange (ARCA)",
"TimeZone": 3,
"TimeZoneAbbreviation": "EST",
"TimeZoneOffset": "-05:00:00"
},
{
"AllDay": False,
"CountryCode": "SG",
"Currency": "SGD",
"ExchangeId": "SGX-DT",
"ExchangeSessions": [
{
"EndTime": "2019-03-03T23:43:00.000000Z",
"StartTime": "2019-03-01T11:05:00.000000Z",
"State": "Closed"
},
{
"EndTime": "2019-03-04T11:05:00.000000Z",
"StartTime": "2019-03-03T23:43:00.000000Z",
"State": "AutomatedTrading"
},
{
"EndTime": "2019-03-04T23:43:00.000000Z",
"StartTime": "2019-03-04T11:05:00.000000Z",
"State": "Closed"
},
{
"EndTime": "2019-03-05T11:05:00.000000Z",
"StartTime": "2019-03-04T23:43:00.000000Z",
"State": "AutomatedTrading"
}
],
"Mic": "XSES",
"Name": "Singapore Exchange Derivatives Trading Ltd.",
"TimeZone": 2,
"TimeZoneAbbreviation": "SGT",
"TimeZoneOffset": "08:00:00"
},
{
"AllDay": False,
"CountryCode": "CH",
"Currency": "CHF",
"ExchangeId": "SWX_ETF",
"ExchangeSessions": [
{
"EndTime": "2019-03-04T05:00:00.000000Z",
"StartTime": "2019-03-01T16:35:00.000000Z",
"State": "Closed"
},
{
"EndTime": "2019-03-04T08:00:00.000000Z",
"StartTime": "2019-03-04T05:00:00.000000Z",
"State": "PreTrading"
},
{
"EndTime": "2019-03-04T16:30:00.000000Z",
"StartTime": "2019-03-04T08:00:00.000000Z",
"State": "AutomatedTrading"
},
{
"EndTime": "2019-03-04T16:35:00.000000Z",
"StartTime": "2019-03-04T16:30:00.000000Z",
"State": "CallAuctionTrading"
},
{
"EndTime": "2019-03-05T05:00:00.000000Z",
"StartTime": "2019-03-04T16:35:00.000000Z",
"State": "Closed"
}
],
"Mic": "XSWX",
"Name": "SIX Swiss Exchange (ETFs)",
"TimeZone": 4,
"TimeZoneAbbreviation": "CET",
"TimeZoneOffset": "01:00:00"
}]
}
},
"_v3_ExchangeDetails": {
"url": "/openapi/ref/v1/exchanges/{ExchangeId}",
"response": {
"AllDay": False,
"CountryCode": "US",
"Currency": "USD",
"ExchangeId": "NYSE_ARCA",
"ExchangeSessions": [
{
"EndTime": "2019-03-04T12:00:00.000000Z",
"StartTime": "2019-03-01T21:00:00.000000Z",
"State": "Closed"
},
{
"EndTime": "2019-03-04T14:30:00.000000Z",
"StartTime": "2019-03-04T12:00:00.000000Z",
"State": "PreTrading"
},
{
"EndTime": "2019-03-04T21:00:00.000000Z",
"StartTime": "2019-03-04T14:30:00.000000Z",
"State": "AutomatedTrading"
},
{
"EndTime": "2019-03-05T12:00:00.000000Z",
"StartTime": "2019-03-04T21:00:00.000000Z",
"State": "Closed"
}
],
"Mic": "ARCX",
"Name": "New York Stock Exchange (ARCA)",
"TimeZone": 3,
"TimeZoneAbbreviation": "EST",
"TimeZoneOffset": "-05:00:00"
}
}
}
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/referencedata/responses/exchanges.py
| 0.519521 | 0.356559 |
exchanges.py
|
pypi
|
responses = {
"_v3_Instruments": {
"url": "/openapi/ref/v1/instruments",
"params": {'AccountKey': 'Cf4xZWiYL6W1nMKpygBLLA==',
'Uics': '12,22,23'},
"response": {
"Data": [
{
"AssetType": "FxSpot",
"CurrencyCode": "AUD",
"Description": "Euro/Australian Dollar",
"ExchangeId": "SBFX",
"GroupId": 28784,
"Identifier": 12,
"SummaryType": "Instrument",
"Symbol": "EURAUD",
"TradableAs": [
"FxSpot",
"FxForwards",
"FxVanillaOption",
"FxKnockInOption",
"FxKnockOutOption"
]
},
{
"AssetType": "FxSpot",
"CurrencyCode": "AUD",
"Description": "British Pound/Australian Dollar",
"ExchangeId": "SBFX",
"GroupId": 28867,
"Identifier": 22,
"SummaryType": "Instrument",
"Symbol": "GBPAUD",
"TradableAs": [
"FxSpot",
"FxForwards",
"FxVanillaOption",
"FxKnockInOption",
"FxKnockOutOption"
]
},
{
"AssetType": "FxSpot",
"CurrencyCode": "CAD",
"Description": "British Pound/Canadian Dollar",
"ExchangeId": "SBFX",
"GroupId": 28871,
"Identifier": 23,
"SummaryType": "Instrument",
"Symbol": "GBPCAD",
"TradableAs": [
"FxSpot",
"FxForwards",
"FxVanillaOption",
"FxKnockInOption",
"FxKnockOutOption"
]
}
]
}
},
"_v3_InstrumentsDetails": {
"url": "/openapi/ref/v1/instruments/details",
"params": {'AccountKey': 'Cf4xZWiYL6W1nMKpygBLLA==', 'Uics': '23'},
"response": {
"__count": 6,
"Data": [
{
"AmountDecimals": 6,
"AssetType": "FxSwap",
"CurrencyCode": "CAD",
"DefaultAmount": 100000,
"DefaultSlippage": 0.01,
"DefaultSlippageType": "Percentage",
"Description": "British Pound/Canadian Dollar",
"Exchange": {
"CountryCode": "DK",
"ExchangeId": "SBFX",
"Name": "Inter Bank"
},
"Format": {
"Decimals": 4,
"Format": "AllowTwoDecimalPips",
"OrderDecimals": 4
},
"FxForwardMaxForwardDate": "2020-03-10T00:00:00.000000Z",
"FxForwardMinForwardDate": "2019-03-06T00:00:00.000000Z",
"FxSpotDate": "2019-03-06T00:00:00.000000Z",
"GroupId": 0,
"IncrementSize": 5000,
"IsTradable": True,
"MinimumTradeSize": 1000,
"OrderDistances": {
"EntryDefaultDistance": 0.5,
"EntryDefaultDistanceType": "Percentage",
"StopLimitDefaultDistance": 5,
"StopLimitDefaultDistanceType": "Pips",
"StopLossDefaultDistance": 50,
"StopLossDefaultDistanceType": "Pips",
"StopLossDefaultEnabled": False,
"StopLossDefaultOrderType": "Stop",
"TakeProfitDefaultDistance": 50,
"TakeProfitDefaultDistanceType": "Pips",
"TakeProfitDefaultEnabled": False
},
"StandardAmounts": [
10000,
50000,
100000,
250000,
500000,
1000000,
2000000,
5000000,
10000000,
20000000
],
"SupportedOrderTypes": [
"Market",
"Limit"
],
"Symbol": "GBPCAD",
"TickSize": 0.0001,
"TickSizeLimitOrder": 0.0001,
"TickSizeStopOrder": 0.00005,
"TradableAs": [
"FxSpot",
"FxForwards",
"FxVanillaOption",
"FxKnockInOption",
"FxKnockOutOption"
],
"TradableOn": [],
"TradingSignals": "NotAllowed",
"Uic": 23
},
{
"AmountDecimals": 6,
"AssetType": "FxKnockOutOption",
"CurrencyCode": "CAD",
"DefaultAmount": 100,
"Description": "British Pound/Canadian Dollar",
"Exchange": {
"CountryCode": "DK",
"ExchangeId": "SBFX",
"Name": "Inter Bank"
},
"Format": {
"Decimals": 4,
"OrderDecimals": 4,
"StrikeDecimals": 4
},
"FxForwardMaxForwardDate": "2020-03-10T00:00:00.000000Z",
"FxForwardMinForwardDate": "2019-03-06T00:00:00.000000Z",
"GroupId": 0,
"IncrementSize": 100,
"IsTradable": True,
"MinimumTradeSize": 1000,
"OptionsChainSubscriptionAllowed": True,
"OrderDistances": {
"EntryDefaultDistance": 0.25,
"EntryDefaultDistanceType": "Percentage",
"StopLimitDefaultDistance": 5,
"StopLimitDefaultDistanceType": "Pips",
"StopLossDefaultDistance": 0.5,
"StopLossDefaultDistanceType": "Percentage",
"StopLossDefaultEnabled": False,
"StopLossDefaultOrderType": "Stop",
"TakeProfitDefaultDistance": 0.5,
"TakeProfitDefaultDistanceType": "Percentage",
"TakeProfitDefaultEnabled": False
},
"StandardAmounts": [
10000,
30000,
50000,
80000,
100000
],
"SupportedOrderTypes": [
"Market",
"Limit",
"Stop",
"TrailingStop",
"StopLimit"
],
"Symbol": "GBPCAD",
"TickSize": 0.0001,
"TickSizeLimitOrder": 0.0001,
"TickSizeStopOrder": 0.00005,
"TradableAs": [
"FxSpot",
"FxForwards",
"FxVanillaOption",
"FxKnockInOption",
"FxKnockOutOption"
],
"TradableOn": [
"9226397"
],
"TradingSignals": "NotAllowed",
"Uic": 23
},
{
"AmountDecimals": 6,
"AssetType": "FxKnockInOption",
"CurrencyCode": "CAD",
"DefaultAmount": 100,
"Description": "British Pound/Canadian Dollar",
"Exchange": {
"CountryCode": "DK",
"ExchangeId": "SBFX",
"Name": "Inter Bank"
},
"Format": {
"Decimals": 4,
"OrderDecimals": 4,
"StrikeDecimals": 4
},
"FxForwardMaxForwardDate": "2020-03-10T00:00:00.000000Z",
"FxForwardMinForwardDate": "2019-03-06T00:00:00.000000Z",
"GroupId": 0,
"IncrementSize": 100,
"IsTradable": True,
"MinimumTradeSize": 1000,
"OptionsChainSubscriptionAllowed": True,
"OrderDistances": {
"EntryDefaultDistance": 0.25,
"EntryDefaultDistanceType": "Percentage",
"StopLimitDefaultDistance": 5,
"StopLimitDefaultDistanceType": "Pips",
"StopLossDefaultDistance": 0.5,
"StopLossDefaultDistanceType": "Percentage",
"StopLossDefaultEnabled": False,
"StopLossDefaultOrderType": "Stop",
"TakeProfitDefaultDistance": 0.5,
"TakeProfitDefaultDistanceType": "Percentage",
"TakeProfitDefaultEnabled": False
},
"StandardAmounts": [
10000,
30000,
50000,
80000,
100000
],
"SupportedOrderTypes": [
"Market",
"Limit",
"Stop",
"TrailingStop",
"StopLimit"
],
"Symbol": "GBPCAD",
"TickSize": 0.0001,
"TickSizeLimitOrder": 0.0001,
"TickSizeStopOrder": 0.00005,
"TradableAs": [
"FxSpot",
"FxForwards",
"FxVanillaOption",
"FxKnockInOption",
"FxKnockOutOption"
],
"TradableOn": [
"9226397"
],
"TradingSignals": "NotAllowed",
"Uic": 23
},
{
"AmountDecimals": 6,
"AssetType": "FxVanillaOption",
"CurrencyCode": "CAD",
"DefaultAmount": 100000,
"Description": "British Pound/Canadian Dollar",
"Exchange": {
"CountryCode": "DK",
"ExchangeId": "SBFX",
"Name": "Inter Bank"
},
"Format": {
"Decimals": 4,
"Format": "AllowDecimalPips",
"StrikeDecimals": 4
},
"FxForwardMaxForwardDate": "2020-03-10T00:00:00.000000Z",
"FxForwardMinForwardDate": "2019-03-06T00:00:00.000000Z",
"GroupId": 0,
"IncrementSize": 10000,
"IsTradable": True,
"MinimumTradeSize": 1000,
"OptionsChainSubscriptionAllowed": True,
"OrderDistances": {
"EntryDefaultDistance": 0.5,
"EntryDefaultDistanceType": "Percentage",
"StopLimitDefaultDistance": 5,
"StopLimitDefaultDistanceType": "Pips",
"StopLossDefaultDistance": 50,
"StopLossDefaultDistanceType": "Pips",
"StopLossDefaultEnabled": False,
"StopLossDefaultOrderType": "Stop",
"TakeProfitDefaultDistance": 50,
"TakeProfitDefaultDistanceType": "Pips",
"TakeProfitDefaultEnabled": False
},
"StandardAmounts": [
100000,
250000,
500000,
1000000,
2000000,
3000000,
4000000,
5000000,
10000000,
20000000
],
"SupportedOrderTypes": [
"Market",
"Limit",
"Stop",
"TrailingStop",
"StopLimit"
],
"Symbol": "GBPCAD",
"TickSize": 0.0005,
"TickSizeLimitOrder": 0.0001,
"TickSizeStopOrder": 0.00005,
"TradableAs": [
"FxSpot",
"FxForwards",
"FxVanillaOption",
"FxKnockInOption",
"FxKnockOutOption"
],
"TradableOn": [
"9226397"
],
"TradingSignals": "NotAllowed",
"Uic": 23
},
{
"AmountDecimals": 6,
"AssetType": "FxSpot",
"CurrencyCode": "CAD",
"DefaultAmount": 100000,
"DefaultSlippage": 0.01,
"DefaultSlippageType": "Percentage",
"Description": "British Pound/Canadian Dollar",
"Exchange": {
"CountryCode": "DK",
"ExchangeId": "SBFX",
"Name": "Inter Bank"
},
"Format": {
"Decimals": 4,
"Format": "AllowDecimalPips",
"OrderDecimals": 4
},
"FxForwardMaxForwardDate": "2020-03-10T00:00:00.000000Z",
"FxForwardMinForwardDate": "2019-03-06T00:00:00.000000Z",
"FxSpotDate": "2019-03-06T00:00:00.000000Z",
"GroupId": 0,
"IncrementSize": 5000,
"IsTradable": True,
"MinimumTradeSize": 1000,
"OrderDistances": {
"EntryDefaultDistance": 0.5,
"EntryDefaultDistanceType": "Percentage",
"StopLimitDefaultDistance": 5,
"StopLimitDefaultDistanceType": "Pips",
"StopLossDefaultDistance": 50,
"StopLossDefaultDistanceType": "Pips",
"StopLossDefaultEnabled": False,
"StopLossDefaultOrderType": "Stop",
"TakeProfitDefaultDistance": 50,
"TakeProfitDefaultDistanceType": "Pips",
"TakeProfitDefaultEnabled": False
},
"StandardAmounts": [
10000,
50000,
100000,
250000,
500000,
1000000,
2000000,
5000000,
10000000,
20000000
],
"SupportedOrderTypes": [
"Market",
"Limit",
"Stop",
"TrailingStop",
"StopLimit"
],
"Symbol": "GBPCAD",
"TickSize": 0.00005,
"TickSizeLimitOrder": 0.0001,
"TickSizeStopOrder": 0.00005,
"TradableAs": [
"FxSpot",
"FxForwards",
"FxVanillaOption",
"FxKnockInOption",
"FxKnockOutOption"
],
"TradableOn": [
"9226397"
],
"TradingSignals": "Allowed",
"Uic": 23
},
{
"AmountDecimals": 6,
"AssetType": "FxForwards",
"CurrencyCode": "CAD",
"DefaultAmount": 100000,
"DefaultSlippage": 0.01,
"DefaultSlippageType": "Percentage",
"Description": "British Pound/Canadian Dollar",
"Exchange": {
"CountryCode": "DK",
"ExchangeId": "SBFX",
"Name": "Inter Bank"
},
"Format": {
"Decimals": 4,
"Format": "AllowDecimalPips",
"OrderDecimals": 4
},
"FxForwardMaxForwardDate": "2020-03-10T00:00:00.000000Z",
"FxForwardMinForwardDate": "2019-03-06T00:00:00.000000Z",
"FxSpotDate": "2019-03-06T00:00:00.000000Z",
"GroupId": 0,
"IncrementSize": 5000,
"IsTradable": True,
"MinimumTradeSize": 1000,
"OrderDistances": {
"EntryDefaultDistance": 0.5,
"EntryDefaultDistanceType": "Percentage",
"StopLimitDefaultDistance": 5,
"StopLimitDefaultDistanceType": "Pips",
"StopLossDefaultDistance": 50,
"StopLossDefaultDistanceType": "Pips",
"StopLossDefaultEnabled": False,
"StopLossDefaultOrderType": "Stop",
"TakeProfitDefaultDistance": 50,
"TakeProfitDefaultDistanceType": "Pips",
"TakeProfitDefaultEnabled": False
},
"StandardAmounts": [
10000,
50000,
100000,
250000,
500000,
1000000,
2000000,
5000000,
10000000,
20000000
],
"SupportedOrderTypes": [
"Market",
"Limit",
"Stop",
"TrailingStop",
"StopLimit"
],
"Symbol": "GBPCAD",
"TickSize": 0.0001,
"TickSizeLimitOrder": 0.0001,
"TickSizeStopOrder": 0.00005,
"TradableAs": [
"FxSpot",
"FxForwards",
"FxVanillaOption",
"FxKnockInOption",
"FxKnockOutOption"
],
"TradableOn": [
"9226397"
],
"TradingSignals": "NotAllowed",
"Uic": 23
}
]
}
},
"_v3_InstrumentDetails": {
"url": "/openapi/ref/v1/instruments/details/{Uic}/{AssetType}",
"params": {'AccountKey': 'Cf4xZWiYL6W1nMKpygBLLA=='},
"response": {
"AmountDecimals": 6,
"AssetType": "FxForwards",
"CurrencyCode": "CAD",
"DefaultAmount": 100000,
"DefaultSlippage": 0.01,
"DefaultSlippageType": "Percentage",
"Description": "British Pound/Canadian Dollar",
"Exchange": {
"CountryCode": "DK",
"ExchangeId": "SBFX",
"Name": "Inter Bank"
},
"Format": {
"Decimals": 4,
"Format": "AllowDecimalPips",
"OrderDecimals": 4
},
"FxForwardMaxForwardDate": "2020-03-10T00:00:00.000000Z",
"FxForwardMinForwardDate": "2019-03-06T00:00:00.000000Z",
"FxSpotDate": "2019-03-06T00:00:00.000000Z",
"GroupId": 0,
"IncrementSize": 5000,
"IsTradable": True,
"MinimumTradeSize": 1000,
"OrderDistances": {
"EntryDefaultDistance": 0.5,
"EntryDefaultDistanceType": "Percentage",
"StopLimitDefaultDistance": 5,
"StopLimitDefaultDistanceType": "Pips",
"StopLossDefaultDistance": 50,
"StopLossDefaultDistanceType": "Pips",
"StopLossDefaultEnabled": False,
"StopLossDefaultOrderType": "Stop",
"TakeProfitDefaultDistance": 50,
"TakeProfitDefaultDistanceType": "Pips",
"TakeProfitDefaultEnabled": False
},
"StandardAmounts": [
10000,
50000,
100000,
250000,
500000,
1000000,
2000000,
5000000,
10000000,
20000000
],
"SupportedOrderTypes": [
"Market",
"Limit",
"Stop",
"TrailingStop",
"StopLimit"
],
"Symbol": "GBPCAD",
"TickSize": 0.0001,
"TickSizeLimitOrder": 0.0001,
"TickSizeStopOrder": 0.00005,
"TradableAs": [
"FxSpot",
"FxForwards",
"FxVanillaOption",
"FxKnockInOption",
"FxKnockOutOption"
],
"TradableOn": [
"9226397"
],
"TradingSignals": "NotAllowed",
"Uic": 23
}
},
"_v3_ContractoptionSpaces": {
"url": "/openapi/ref/v1/instruments/contractoptionspaces/"
"{OptionRootID}/",
"params": {
"ExpiryDates": "2019-05-01",
"OptionSpaceSegment": "SpecificDates",
},
"response": {
"AmountDecimals": 0,
"AssetType": "StockOption",
"CanParticipateInMultiLegOrder": False,
"ContractSize": 100,
"CurrencyCode": "EUR",
"DefaultAmount": 1,
"DefaultExpiry": "2019-04-18T00:00:00Z",
"DefaultOption": {
"PutCall": "Call",
"StrikePrice": 27,
"Uic": 11897720,
"UnderlyingUic": 16350
},
"Description": "Royal Dutch Shell Plc A",
"Exchange": {
"CountryCode": "NL",
"ExchangeId": "EUR_AMS2",
"Name": "Euronext Equity & Index Derivatives - AMS"
},
"ExerciseStyle": "American",
"Format": {
"Decimals": 2,
"OrderDecimals": 2,
"StrikeDecimals": 3
},
"GroupId": 0,
"IncrementSize": 1,
"IsTradable": True,
"LotSize": 1,
"LotSizeType": "OddLotsNotAllowed",
"OptionRootId": 231,
"OptionSpace": [
{
"DisplayDaysToExpiry": 0,
"DisplayExpiry": "2019-03-01",
"Expiry": "2019-03-15",
"LastTradeDate": "2019-03-15T16:30:00.000000Z",
"TickSizeScheme": {
"DefaultTickSize": 0.05,
"Elements": [
{
"HighPrice": 5,
"TickSize": 0.01
}
]
}
},
{
"DisplayDaysToExpiry": 24,
"DisplayExpiry": "2019-04-01",
"Expiry": "2019-04-18",
"LastTradeDate": "2019-04-18T15:30:00.000000Z",
"SpecificOptions": [
{
"PutCall": "Call",
"StrikePrice": 24,
"Uic": 11897711,
"UnderlyingUic": 16350
},
{
"PutCall": "Call",
"StrikePrice": 25,
"Uic": 11897712,
"UnderlyingUic": 16350
},
{
"PutCall": "Call",
"StrikePrice": 26.5,
"Uic": 11897717,
"UnderlyingUic": 16350
},
{
"PutCall": "Call",
"StrikePrice": 26,
"Uic": 11897719,
"UnderlyingUic": 16350
},
{
"PutCall": "Call",
"StrikePrice": 27,
"Uic": 11897720,
"UnderlyingUic": 16350
},
{
"PutCall": "Put",
"StrikePrice": 25,
"Uic": 11897721,
"UnderlyingUic": 16350
},
{
"PutCall": "Put",
"StrikePrice": 24,
"Uic": 11897722,
"UnderlyingUic": 16350
},
{
"PutCall": "Put",
"StrikePrice": 26,
"Uic": 11897723,
"UnderlyingUic": 16350
},
{
"PutCall": "Call",
"StrikePrice": 28,
"Uic": 11897724,
"UnderlyingUic": 16350
},
{
"PutCall": "Call",
"StrikePrice": 30,
"Uic": 11897725,
"UnderlyingUic": 16350
},
{
"PutCall": "Call",
"StrikePrice": 27.5,
"Uic": 11897728,
"UnderlyingUic": 16350
},
{
"PutCall": "Call",
"StrikePrice": 29,
"Uic": 11897729,
"UnderlyingUic": 16350
},
{
"PutCall": "Put",
"StrikePrice": 26.5,
"Uic": 11897730,
"UnderlyingUic": 16350
},
{
"PutCall": "Put",
"StrikePrice": 27,
"Uic": 11897731,
"UnderlyingUic": 16350
},
{
"PutCall": "Put",
"StrikePrice": 27.5,
"Uic": 11897732,
"UnderlyingUic": 16350
},
{
"PutCall": "Put",
"StrikePrice": 28,
"Uic": 11897733,
"UnderlyingUic": 16350
},
{
"PutCall": "Put",
"StrikePrice": 30,
"Uic": 11897734,
"UnderlyingUic": 16350
},
{
"PutCall": "Put",
"StrikePrice": 29,
"Uic": 11897735,
"UnderlyingUic": 16350
},
{
"PutCall": "Put",
"StrikePrice": 23,
"Uic": 11900544,
"UnderlyingUic": 16350
},
{
"PutCall": "Call",
"StrikePrice": 23,
"Uic": 11900558,
"UnderlyingUic": 16350
},
{
"PutCall": "Call",
"StrikePrice": 25.5,
"Uic": 11903077,
"UnderlyingUic": 16350
},
{
"PutCall": "Put",
"StrikePrice": 25.5,
"Uic": 11903078,
"UnderlyingUic": 16350
},
{
"PutCall": "Put",
"StrikePrice": 22,
"Uic": 11949922,
"UnderlyingUic": 16350
},
{
"PutCall": "Call",
"StrikePrice": 22,
"Uic": 11949924,
"UnderlyingUic": 16350
},
{
"PutCall": "Call",
"StrikePrice": 32,
"Uic": 12003078,
"UnderlyingUic": 16350
},
{
"PutCall": "Put",
"StrikePrice": 32,
"Uic": 12003081,
"UnderlyingUic": 16350
},
{
"PutCall": "Call",
"StrikePrice": 28.5,
"Uic": 12007474,
"UnderlyingUic": 16350
},
{
"PutCall": "Put",
"StrikePrice": 28.5,
"Uic": 12007478,
"UnderlyingUic": 16350
}
],
"TickSizeScheme": {
"DefaultTickSize": 0.05,
"Elements": [
{
"HighPrice": 5,
"TickSize": 0.01
}
]
}
},
{
"DisplayDaysToExpiry": 54,
"DisplayExpiry": "2019-05-01",
"Expiry": "2019-05-17",
"LastTradeDate": "2019-05-17T15:30:00.000000Z",
"TickSizeScheme": {
"DefaultTickSize": 0.05,
"Elements": [
{
"HighPrice": 5,
"TickSize": 0.01
}
]
}
},
{
"DisplayDaysToExpiry": 85,
"DisplayExpiry": "2019-06-01",
"Expiry": "2019-06-21",
"LastTradeDate": "2019-06-21T15:30:00.000000Z",
"TickSizeScheme": {
"DefaultTickSize": 0.05,
"Elements": [
{
"HighPrice": 5,
"TickSize": 0.01
}
]
}
},
{
"DisplayDaysToExpiry": 177,
"DisplayExpiry": "2019-09-01",
"Expiry": "2019-09-20",
"LastTradeDate": "2019-09-20T15:30:00.000000Z",
"TickSizeScheme": {
"DefaultTickSize": 0.05,
"Elements": [
{
"HighPrice": 5,
"TickSize": 0.01
}
]
}
},
{
"DisplayDaysToExpiry": 268,
"DisplayExpiry": "2019-12-01",
"Expiry": "2019-12-20",
"LastTradeDate": "2019-12-20T16:30:00.000000Z",
"TickSizeScheme": {
"DefaultTickSize": 0.05,
"Elements": [
{
"HighPrice": 5,
"TickSize": 0.01
}
]
}
},
{
"DisplayDaysToExpiry": 451,
"DisplayExpiry": "2020-06-01",
"Expiry": "2020-06-19",
"LastTradeDate": "2020-06-19T16:30:00.000000Z",
"TickSizeScheme": {
"DefaultTickSize": 0.05,
"Elements": [
{
"HighPrice": 5,
"TickSize": 0.01
}
]
}
},
{
"DisplayDaysToExpiry": 634,
"DisplayExpiry": "2020-12-01",
"Expiry": "2020-12-18",
"LastTradeDate": "2020-12-18T16:30:00.000000Z",
"TickSizeScheme": {
"DefaultTickSize": 0.05,
"Elements": [
{
"HighPrice": 5,
"TickSize": 0.01
}
]
}
},
{
"DisplayDaysToExpiry": 999,
"DisplayExpiry": "2021-12-01",
"Expiry": "2021-12-17",
"LastTradeDate": "2021-12-17T16:30:00.000000Z",
"TickSizeScheme": {
"DefaultTickSize": 0.05,
"Elements": [
{
"HighPrice": 5,
"TickSize": 0.01
}
]
}
},
{
"DisplayDaysToExpiry": 1364,
"DisplayExpiry": "2022-12-01",
"Expiry": "2022-12-16",
"LastTradeDate": "2022-12-16T16:30:00.000000Z",
"TickSizeScheme": {
"DefaultTickSize": 0.05,
"Elements": [
{
"HighPrice": 5,
"TickSize": 0.01
}
]
}
},
{
"DisplayDaysToExpiry": 1729,
"DisplayExpiry": "2023-12-01",
"Expiry": "2023-12-15",
"LastTradeDate": "2023-12-15T16:30:00.000000Z",
"TickSizeScheme": {
"DefaultTickSize": 0.05,
"Elements": [
{
"HighPrice": 5,
"TickSize": 0.01
}
]
}
}
],
"OrderDistances": {
"EntryDefaultDistance": 0,
"EntryDefaultDistanceType": "Percentage",
"StopLimitDefaultDistance": 5,
"StopLimitDefaultDistanceType": "Pips",
"StopLossDefaultDistance": 0.5,
"StopLossDefaultDistanceType": "Percentage",
"StopLossDefaultEnabled": False,
"StopLossDefaultOrderType": "Stop",
"TakeProfitDefaultDistance": 0.5,
"TakeProfitDefaultDistanceType": "Percentage",
"TakeProfitDefaultEnabled": False
},
"PriceToContractFactor": 100,
"RelatedInstruments": [
{
"AssetType": "CfdOnStock",
"Uic": 16350
},
{
"AssetType": "Stock",
"Uic": 16350
}
],
"RelatedOptionRoots": [],
"SettlementStyle": "PhysicalDelivery",
"StandardAmounts": [
1,
5,
10,
25,
50,
100,
500,
1000
],
"SupportedOrderTypes": [
"Limit"
],
"Symbol": "RDSA:xams",
"TickSize": 0.01,
"TradableOn": [
"9300675"
],
"UnderlyingAssetType": "Stock"
}
},
"_v3_FuturesSpaces": {
"url": "/openapi/ref/v1/instruments/futuresspaces/"
"{ContinuousFuturesUic}",
"route": {'ContinuousFuturesUic': 28016},
"response": {
"BaseIdentifier": "W",
"Elements": [
{
"DaysToExpiry": 64,
"ExpiryDate": "2017-07-14",
"Symbol": "WQ7",
"Uic": 3406797
},
{
"DaysToExpiry": 127,
"ExpiryDate": "2017-09-15",
"Symbol": "WV7",
"Uic": 3844782
},
{
"DaysToExpiry": 188,
"ExpiryDate": "2017-11-15",
"Symbol": "WZ7",
"Uic": 4239352
},
{
"DaysToExpiry": 278,
"ExpiryDate": "2018-02-13",
"Symbol": "WH8",
"Uic": 4895721
},
{
"DaysToExpiry": 337,
"ExpiryDate": "2018-04-13",
"Symbol": "WK8",
"Uic": 5352847
},
{
"DaysToExpiry": 431,
"ExpiryDate": "2018-07-16",
"Symbol": "WQ8",
"Uic": 6112156
},
{
"DaysToExpiry": 491,
"ExpiryDate": "2018-09-14",
"Symbol": "WV8",
"Uic": 6609171
}
]
}
},
"_v3_TradingSchedule": {
"url": "/openapi/ref/v1/instruments/tradingschedule/{Uic}/{AssetType}",
"response": {
"Sessions": [
{
"EndTime": "2019-03-08T21:59:00.000000Z",
"StartTime": "2019-03-07T22:04:00.000000Z",
"State": "AutomatedTrading"
},
{
"EndTime": "2019-03-10T18:04:00.000000Z",
"StartTime": "2019-03-08T21:59:00.000000Z",
"State": "Closed"
},
{
"EndTime": "2019-03-11T20:59:00.000000Z",
"StartTime": "2019-03-10T18:04:00.000000Z",
"State": "AutomatedTrading"
}
],
"TimeZone": 3,
"TimeZoneAbbreviation": "EST",
"TimeZoneOffset": "-05:00:00"
}
}
}
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/referencedata/responses/instruments.py
| 0.499512 | 0.342379 |
instruments.py
|
pypi
|
responses = {
"_v3_Currencies": {
"url": "/openapi/ref/v1/currencies",
"response": {
"Data": [
{
"CurrencyCode": "USD",
"Decimals": 2,
"Name": "US Dollar",
"Symbol": "$"
},
{
"CurrencyCode": "GBP",
"Decimals": 2,
"Name": "British Pound",
"Symbol": "£"
},
{
"CurrencyCode": "EUR",
"Decimals": 2,
"Name": "Euro",
"Symbol": "€"
},
{
"CurrencyCode": "CHF",
"Decimals": 2,
"Name": "Swiss Franc",
"Symbol": "Fr."
},
{
"CurrencyCode": "AUD",
"Decimals": 2,
"Name": "Australian Dollar",
"Symbol": "$"
},
{
"CurrencyCode": "CAD",
"Decimals": 2,
"Name": "Canadian Dollar",
"Symbol": "$"
},
{
"CurrencyCode": "NZD",
"Decimals": 2,
"Name": "New Zealand Dollar",
"Symbol": "$"
},
{
"CurrencyCode": "JPY",
"Decimals": 0,
"Name": "Japanese Yen",
"Symbol": "¥"
},
{
"CurrencyCode": "DKK",
"Decimals": 2,
"Name": "Danish Krone",
"Symbol": "kr."
},
{
"CurrencyCode": "SEK",
"Decimals": 2,
"Name": "Swedish Krona",
"Symbol": "kr"
},
{
"CurrencyCode": "...",
},
]
}
}
}
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/referencedata/responses/currencies.py
| 0.551332 | 0.316647 |
currencies.py
|
pypi
|
responses = {
"_v3_ForwardTenorDates": {
"url": "/openapi/ref/v1/standarddates/forwardtenor/{Uic}",
"params": {'AccountKey': 'Cf4xZWiYL6W1nMKpygBLLA=='},
"response": {
"Data": [
{
"Date": "2019-03-06",
"Unit": "Days",
"Value": 0
},
{
"Date": "2019-03-13",
"Unit": "Weeks",
"Value": 1
},
{
"Date": "2019-03-20",
"Unit": "Weeks",
"Value": 2
},
{
"Date": "2019-03-27",
"Unit": "Weeks",
"Value": 3
},
{
"Date": "2019-04-08",
"Unit": "Months",
"Value": 1
},
{
"Date": "2019-05-07",
"Unit": "Months",
"Value": 2
},
{
"Date": "2019-06-06",
"Unit": "Months",
"Value": 3
},
{
"Date": "2019-09-06",
"Unit": "Months",
"Value": 6
},
{
"Date": "2019-12-06",
"Unit": "Months",
"Value": 9
},
{
"Date": "2020-03-06",
"Unit": "Years",
"Value": 1
}
]
}
},
"_v3_FXOptionExpiryDates": {
"url": "/openapi/ref/v1/standarddates/forwardtenor/{Uic}",
"response": {
"Data": [
{
"Date": "2019-03-07",
"Unit": "Days",
"Value": 1
},
{
"Date": "2019-03-13",
"Unit": "Weeks",
"Value": 1
},
{
"Date": "2019-03-20",
"Unit": "Weeks",
"Value": 2
},
{
"Date": "2019-03-27",
"Unit": "Weeks",
"Value": 3
},
{
"Date": "2019-04-04",
"Unit": "Months",
"Value": 1
},
{
"Date": "2019-05-06",
"Unit": "Months",
"Value": 2
},
{
"Date": "2019-06-06",
"Unit": "Months",
"Value": 3
},
{
"Date": "2019-09-05",
"Unit": "Months",
"Value": 6
},
{
"Date": "2019-12-05",
"Unit": "Months",
"Value": 9
},
{
"Date": "2020-03-05",
"Unit": "Years",
"Value": 1
}
]
}
}
}
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/referencedata/responses/standarddates.py
| 0.490236 | 0.368122 |
standarddates.py
|
pypi
|
responses = {
"_v3_AlgoStrategies": {
"url": "/openapi/ref/v1/algostrategies",
"params": {'$top': '...', '$skip': '...'},
"response": {
"__count": 4,
"Data": [
{
"Description": "Group of VWAP",
"MinAmountUSD": 0,
"Name": "VWAP",
"Parameters": [],
"SupportedDurationTypes": [
"DayOrder"
],
"TradableInstrumentTypes": []
},
{
"Description": "Groups of Iceberg Strategies",
"MinAmountUSD": 0,
"Name": "Iceberg",
"Parameters": [],
"SupportedDurationTypes": [
"DayOrder"
],
"TradableInstrumentTypes": []
},
{
"Description": "Group of With Volume strategies",
"MinAmountUSD": 0,
"Name": "With Volume",
"Parameters": [],
"SupportedDurationTypes": [
"DayOrder"
],
"TradableInstrumentTypes": []
},
{
"Description": "Group of IS strategies",
"MinAmountUSD": 0,
"Name": "Implementation Shortfall",
"Parameters": [],
"SupportedDurationTypes": [
"DayOrder"
],
"TradableInstrumentTypes": []
}
]
}
},
"_v3_AlgoStrategyDetails": {
"url": "/openapi/ref/v1/algostrategies/{Name}",
"response": {
"Description": "Group of IS strategies",
"MinAmountUSD": 0,
"Name": "Implementation Shortfall",
"Parameters": [],
"SupportedDurationTypes": [
"DayOrder"
],
"TradableInstrumentTypes": []
}
},
}
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/referencedata/responses/algostrategies.py
| 0.538983 | 0.354712 |
algostrategies.py
|
pypi
|
from .base import Trading
from ..decorators import dyndoc_insert, endpoint
from .responses.optionschain import responses
@endpoint("openapi/trade/v1/optionschain/subscriptions", "POST", 201)
class OptionsChainSubscriptionCreate(Trading):
"""Create an active options chain subscription."""
@dyndoc_insert(responses)
def __init__(self, data):
"""Instantiate a OptionsChainSubscriptionCreate request.
Parameters
----------
data : dict
the dict representing the parameters of the request body.
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.trading as tr
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> data = {_v3_OptionsChainSubscriptionCreate_body}
>>> r = tr.optionschain.OptionsChainSubscriptionCreate(data=data)
>>> client.request(r)
>>> print(json.dumps(r.response, indent=4))
Output::
{_v3_OptionsChainSubscriptionCreate_body}
"""
super(OptionsChainSubscriptionCreate, self).__init__()
self.data = data
@endpoint("openapi/trade/v1/optionschain/subscriptions"
"{ContextId}/{ReferenceId}", "PATCH", 204)
class OptionsChainSubscriptionModify(Trading):
"""Modify an existing options chain subscription."""
RESPONSE_DATA = None
@dyndoc_insert(responses)
def __init__(self, ContextId, ReferenceId, data):
"""Instantiate a OptionsChainSubscriptionModify request.
Parameters
----------
ContextId: string
the ContextId
ReferenceId: string
the ReferenceId
data : dict
dict representing the parameters of the request body.
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.trading as tr
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> ReferenceId = ...
>>> ContextId = ...
>>> data = {_v3_OptionsChainSubscriptionModify_body}
>>> r = tr.optionschain.OptionsChainSubscriptionModify(
... ReferenceId=ReferenceId,
... ContextId=ContextId,
... data=data)
>>> client.request(r)
>>> assert r.status_code == r.expected_status
No data is returned.
"""
super(OptionsChainSubscriptionModify, self).__init__(
ReferenceId=ReferenceId,
ContextId=ContextId)
self.data = data
@endpoint("openapi/trade/v1/optionschain/subscriptions/"
"{ContextId}/{ReferenceId}", "DELETE", 202)
class OptionsChainSubscriptionRemove(Trading):
"""Remove an options chain subscription."""
RESPONSE_DATA = None
@dyndoc_insert(responses)
def __init__(self, ContextId, ReferenceId):
"""Instantiate a OptionsChainSubscriptionRemove request.
Parameters
----------
ContextId: string
the ContextId
ReferenceId: string
the ReferenceId
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.trading as tr
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> ReferenceId = ...
>>> ContextId = ...
>>> r = tr.optionschain.OptionsChainSubscriptionRemove(
... ReferenceId=ReferenceId,
... ContextId=ContextId
... )
>>> client.request(r)
>>> assert r.status_code == r.expected_status
No data is returned.
"""
super(OptionsChainSubscriptionRemove, self).__init__(
ReferenceId=ReferenceId,
ContextId=ContextId)
@endpoint("openapi/trade/v1/optionschain/subscriptions/"
"{ContextId}/{ReferenceId}/ResetATM", "PUT", 204)
class OptionsChainSubscriptionResetATM(Trading):
"""Reset an options chain subscription 'At The Money'."""
RESPONSE_DATA = None
@dyndoc_insert(responses)
def __init__(self, ContextId, ReferenceId):
"""Instantiate an OptionsChainSubscriptionResetATM request.
Parameters
----------
ContextId: string
the ContextId
ReferenceId: string
the ReferenceId
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.trading as tr
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> ReferenceId = ...
>>> ContextId = ...
>>> r = tr.optionschain.OptionsChainSubscriptionResetATM(
... ReferenceId=ReferenceId,
... ContextId=ContextId
... )
>>> client.request(r)
>>> assert r.status_code == r.expected_status
No data is returned.
"""
super(OptionsChainSubscriptionResetATM, self).__init__(
ReferenceId=ReferenceId,
ContextId=ContextId)
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/trading/optionschain.py
| 0.733643 | 0.160463 |
optionschain.py
|
pypi
|
from .base import Trading
from ..decorators import dyndoc_insert, endpoint
from .responses.positions import responses
@endpoint("openapi/trade/v1/positions", "POST", 201)
class PositionByQuote(Trading):
"""Creates a new position by accepting a quote.
The quote must be the most recent one and it must be tradable:
(Quote.PriceType=PriceType.Tradable).
"""
@dyndoc_insert(responses)
def __init__(self, data):
"""Instantiate a PositionByQuote request.
Parameters
----------
data : dict (required)
dict representing the data body.
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.trading as tr
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> data = {_v3_PositionByQuote_body}
>>> r = tr.positions.PositionByQuote(data=data)
>>> client.request(r)
>>> print(json.dumps(rv, indent=2))
::
{_v3_PositionByQuote_resp}
"""
super(PositionByQuote, self).__init__()
self.data = data
@endpoint("openapi/trade/v1/positions/{PositionId}", "PATCH", 204)
class UpdatePosition(Trading):
"""Updates properties of an existing position. This is only relevant for
FX Options, where you can update the Exercise method.
"""
@dyndoc_insert(responses)
def __init__(self, PositionId, data):
"""Instantiate an UpdatePosition request.
Parameters
----------
PositionId: string (required)
the position id
data : dict (required)
dict representing the data body.
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.trading as tr
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> data = {_v3_UpdatePosition_body}
>>> PositionId = 1019942425
>>> r = tr.positions.UpdatePosition(PositionId, data=data)
>>> client.request(r)
>>> print(json.dumps(rv, indent=2))
::
{_v3_UpdatePosition_resp}
"""
super(UpdatePosition, self).__init__(PositionId=PositionId)
self.data = data
@endpoint("openapi/trade/v1/positions/{PositionId}/exercise", "PUT", 204)
class ExercisePosition(Trading):
"""Forces exercise of a position. This is relevant for Futures Options,
Stock Options, Stock Index Options.
"""
@dyndoc_insert(responses)
def __init__(self, PositionId, data):
"""Instantiate an ExercisePosition request.
Parameters
----------
PositionId: string (required)
the position id
data : dict (required)
dict representing the data body.
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.trading as tr
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> data = {_v3_ExercisePosition_body}
>>> PositionId = 1019942425
>>> r = tr.positions.ExercisePosition(PositionId, data=data)
>>> client.request(r)
>>> print(json.dumps(rv, indent=2))
::
{_v3_ExercisePosition_resp}
"""
super(ExercisePosition, self).__init__(PositionId=PositionId)
self.data = data
@endpoint("openapi/trade/v1/positions/exercise", "PUT", 204)
class ExerciseAmount(Trading):
"""Forces exercise of an amount across all positions for the specified
UIC. This is relevant for Futures Options, Stock Options, Stock Index
Options.
"""
@dyndoc_insert(responses)
def __init__(self, data):
"""Instantiate an ExerciseAmount request.
Parameters
----------
data : dict (required)
dict representing the data body.
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.trading as tr
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> data = {_v3_ExerciseAmount_body}
>>> r = tr.positions.ExerciseAmount(data=data)
>>> client.request(r)
>>> print(json.dumps(rv, indent=2))
::
{_v3_ExerciseAmount_resp}
"""
super(ExerciseAmount, self).__init__()
self.data = data
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/trading/positions.py
| 0.682574 | 0.311535 |
positions.py
|
pypi
|
from .base import Trading
from ..decorators import dyndoc_insert, endpoint
from .responses.orders import responses
@endpoint("openapi/trade/v2/orders", "POST")
class Order(Trading):
"""Place a new order."""
@dyndoc_insert(responses)
def __init__(self, data):
"""Instantiate an Order request.
Parameters
----------
data : dict (required)
dict representing the data body, in this case an order spec.
OrderBody example::
data = {_v3_Order_body}
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.trading as tr
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> data = ...
>>> r = tr.orders.Order(data=data)
>>> client.request(r)
>>> print(json.dumps(r.response, indent=4))
Output::
{_v3_Order_resp}
"""
super(Order, self).__init__()
self.data = data
@endpoint("openapi/trade/v2/orders", "PATCH")
class ChangeOrder(Trading):
"""Change one or more existing orders."""
@dyndoc_insert(responses)
def __init__(self, data):
"""Instantiate a ChangeOrder request.
Parameters
----------
data : dict (required)
dict representing the data body, in this case an order change spec.
OrderBody example::
data = {_v3_ChangeOrder_body}
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.trading as tr
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> data = ...
>>> r = tr.orders.ChangeOrder(data=data)
>>> client.request(r)
>>> print(json.dumps(r.response, indent=4))
Output::
{_v3_ChangeOrder_resp}
"""
super(ChangeOrder, self).__init__()
self.data = data
@endpoint("openapi/trade/v2/orders/{OrderIds}", "DELETE")
class CancelOrders(Trading):
"""Cancel one or more orders."""
@dyndoc_insert(responses)
def __init__(self, OrderIds, params):
"""Instantiate a CancelOrders request.
Parameters
----------
OrderIds: string (required)
',' delimited string with one or more orderId's
params : dict (required)
dict representing the querystring parameters.
params example::
params = {_v3_CancelOrders_params}
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.trading as tr
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> OrderIds="76289286"
>>> params = ...
>>> r = tr.orders.CancelOrders(OrderIds=OrderIds, params=params)
>>> client.request(r)
>>> print(json.dumps(r.response, indent=4))
Output::
{_v3_CancelOrders_resp}
"""
super(CancelOrders, self).__init__(OrderIds=OrderIds)
self.params = params
@endpoint("openapi/trade/v2/orders/precheck", "POST")
class PrecheckOrder(Trading):
"""Precheck an order."""
@dyndoc_insert(responses)
def __init__(self, data):
"""Instantiate a PrecheckOrder request.
Parameters
----------
data : dict (required)
dict representing the data body, in this case an order change spec.
data example::
data = {_v3_PrecheckOrder_body}
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.trading as tr
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> data = ...
>>> r = tr.orders.PrecheckOrder(data=data)
>>> client.request(r)
>>> print(json.dumps(r.response, indent=4))
Output::
{_v3_PrecheckOrder_resp}
"""
super(PrecheckOrder, self).__init__()
self.data = data
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/trading/orders.py
| 0.732592 | 0.219588 |
orders.py
|
pypi
|
from .base import Trading
from ..decorators import dyndoc_insert, endpoint
from .responses.infoprices import responses
@endpoint("openapi/trade/v1/infoprices")
class InfoPrice(Trading):
"""Gets an info price for an instrument using the specified
parameters.
"""
@dyndoc_insert(responses)
def __init__(self, params):
"""Instantiate an InfoPrice request.
Parameters
----------
params : dict (required)
dict representing the querystring parameters.
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.trading as tr
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> params = {_v3_InfoPrice_params}
>>> r = tr.infoprices.InfoPrice(params=params)
>>> client.request(r)
>>> print(json.dumps(r.response, indent=4))
Output::
{_v3_InfoPrice_resp}
"""
super(InfoPrice, self).__init__()
self.params = params
@endpoint("openapi/trade/v1/infoprices/list")
class InfoPrices(Trading):
"""Gets a list of info prices for a list of instruments using the
specified parameters.
"""
@dyndoc_insert(responses)
def __init__(self, params):
"""Instantiate an InfoPrices request.
Parameters
----------
params : dict (required)
dict representing the querystring parameters.
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.trading as tr
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> params = {_v3_InfoPrices_params}
>>> r = tr.infoprices.InfoPrices(params=params)
>>> client.request(r)
>>> print(json.dumps(r.response, indent=4))
Output::
{_v3_InfoPrices_resp}
"""
super(InfoPrices, self).__init__()
self.params = params
@endpoint("openapi/trade/v1/infoprices/subscriptions", "POST", 201)
class CreateInfoPriceSubscription(Trading):
"""Sets up a subscription and returns an initial snapshot of an info
price list specified by the parameters in the request."""
@dyndoc_insert(responses)
def __init__(self, data):
"""Instantiate a CreateInfoPriceSubscription request.
Parameters
----------
data : dict (required)
dict representing the parameters of the data body.
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.trading as tr
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> data = {_v3_CreateInfoPriceSubscription_body}
>>> r = tr.infoprices.CreateInfoPriceSubscription(data=data)
>>> client.request(r)
>>> print(json.dumps(r.response, indent=4))
Output::
{_v3_CreateInfoPriceSubscription_resp}
"""
super(CreateInfoPriceSubscription, self).__init__()
self.data = data
@endpoint("openapi/trade/v1/infoprices/subscriptions/{ContextId}",
"DELETE", 202)
class RemoveInfoPriceSubscriptionsByTag(Trading):
"""Remove one or more infoprice subscriptions."""
RESPONSE_DATA = None
@dyndoc_insert(responses)
def __init__(self, ContextId, params=None):
"""Instantiate a RemoveInfoPriceSubscriptionsByTag request.
Parameters
----------
ContextId: string (required)
the context id.
params : dict (required)
dict representing the querystring parameters.
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.trading as tr
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> ContextId = 'ctxt_20190316'
>>> params = {_v3_RemoveInfoPriceSubscriptionsByTag_params}
>>> r = tr.infoprices.RemoveInfoPriceSubscriptionsByTag(
... ContextId=ContextId,
... params=params)
>>> client.request(r)
>>> assert r.status_code == r.expected_status
No data is returned.
"""
super(RemoveInfoPriceSubscriptionsByTag, self).__init__(
ContextId=ContextId)
self.params = params
@endpoint("openapi/trade/v1/infoprices/subscriptions/"
"{ContextId}/{ReferenceId}",
"DELETE", 202)
class RemoveInfoPriceSubscriptionById(Trading):
"""Remove an info price subscription on a single instrument."""
RESPONSE_DATA = None
@dyndoc_insert(responses)
def __init__(self, ContextId, ReferenceId):
"""Instantiate a RemoveInfoPricesSubscriptionById request.
Parameters
----------
ContextId: string (required)
the context id.
ReferenceId: string (required)
the ReferenceId id.
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.trading as tr
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> ContextId = 'ctxt_20190316'
>>> ReferenceId = 'pri_01'
>>> r = tr.infoprices.RemoveInfoPriceSubscriptionById(
... ContextId=ContextId,
... ReferenceId=ReferenceId)
>>> client.request(r)
>>> assert r.status_code == r.expected_status
No data is returned.
"""
super(RemoveInfoPriceSubscriptionById, self).__init__(
ContextId=ContextId,
ReferenceId=ReferenceId)
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/trading/infoprices.py
| 0.778102 | 0.257497 |
infoprices.py
|
pypi
|
from .base import Trading
from ..decorators import dyndoc_insert, endpoint
from .responses.prices import responses
@endpoint("openapi/trade/v1/prices/subscriptions", "POST", 201)
class CreatePriceSubscription(Trading):
"""Sets up an active price subscription on an instrument and returns an
initial snapshot of the most recent price.
"""
@dyndoc_insert(responses)
def __init__(self, data):
"""Instantiate a CreatePriceSubscription request.
Parameters
----------
data : dict (required)
dict representing the data body, in this case an order spec.
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.trading as tr
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> data = {_v3_CreatePriceSubscription_body}
>>> r = tr.prices.CreatePriceSubscription(data=data)
>>> client.request(r)
>>> print(json.dumps(rv, indent=2))
::
{_v3_CreatePriceSubscription_resp}
"""
super(CreatePriceSubscription, self).__init__()
self.data = data
@endpoint("openapi/trade/v1/prices/subscriptions/{ContextId}/{ReferenceId}",
"PUT", 204)
class MarginImpactRequest(Trading):
"""Request margin impact to come on one of the next following price
updates.
"""
RESPONSE_DATA = None
@dyndoc_insert(responses)
def __init__(self, ContextId, ReferenceId):
"""Instantiate a MarginImpactRequest request.
Parameters
----------
ContextId : string (required)
the ContextId
ReferenceId : string (required)
the ReferenceId
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.trading as tr
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> ContextId = "ctxt_20190311"
>>> ReferenceId = "EURUSD"
>>> r = tr.prices.MarginImpactRequest(ContextId=ContextId,
... ReferenceId=ReferenceId)
>>> client.request(r)
>>> assert r.status_code == r.expected_status
No data is returned.
"""
super(MarginImpactRequest, self).__init__(ContextId=ContextId,
ReferenceId=ReferenceId)
@endpoint("openapi/trade/v1/prices/subscriptions/{ContextId}/",
"DELETE", 202)
class PriceSubscriptionRemoveByTag(Trading):
"""Remove multiple subscriptions for the given ContextId, optionally
marked with a specific tag.
"""
RESPONSE_DATA = None
@dyndoc_insert(responses)
def __init__(self, ContextId, params=None):
"""Instantiate a PriceSubscriptionRemoveByTag request.
Parameters
----------
ContextId: string (required)
the ContextId
params: dict (optional)
dict representing the querystring parameters
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.trading as tr
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> params = {_v3_PriceSubscriptionRemoveByTag_params}
>>> ContextId = ...
>>> r = tr.prices.PriceSubscriptionRemoveByTag(ContextId,
... params=params)
>>> client.request(r)
>>> assert r.status_code == r.expected_status
No data is returned.
"""
super(PriceSubscriptionRemoveByTag, self).__init__(ContextId=ContextId)
self.params = params
@endpoint("openapi/trade/v1/prices/subscriptions/{ContextId}/{ReferenceId}",
"DELETE", 202)
class PriceSubscriptionRemove(Trading):
"""Removes subscription for the current session identified by
subscription id.
"""
RESPONSE_DATA = None
@dyndoc_insert(responses)
def __init__(self, ContextId, ReferenceId):
"""Instantiate a PriceSubscriptionRemove request.
Parameters
----------
ContextId: string (required)
the ContextId
ReferenceId: string (required)
the ReferenceId
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.trading as tr
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> ContextId = ...
>>> ReferenceId = ...
>>> r = tr.prices.PriceSubscriptionRemove(ContextId, ReferenceId)
>>> client.request(r)
>>> assert r.status_code == r.expected_status
No data is returned.
"""
super(PriceSubscriptionRemove, self).__init__(
ContextId=ContextId,
ReferenceId=ReferenceId)
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/trading/prices.py
| 0.698432 | 0.218461 |
prices.py
|
pypi
|
responses = {
"_v3_InfoPrice": {
"url": "openapi/trade/v1/infoprices",
"params": {
'Uic': 22,
'AccountKey': '81ef1924-c25f-43fe-90ff-028e3fe249f2',
'AssetType': '...',
'<other-parms>': '...',
},
"response": {
"AssetType": "FxSpot",
"DisplayAndFormat": {
"Currency": "AUD",
"Decimals": 4,
"Description": "British Pound/Australian Dollar",
"Format": "AllowDecimalPips",
"OrderDecimals": 4,
"Symbol": "GBPAUD"
},
"HistoricalChanges": {
"PercentChange1Month": 1.21,
"PercentChange2Months": 2.95,
"PercentChange3Months": 1.85,
"PercentChange6Months": -1.83,
"PercentChangeWeekly": 1.67
},
"InstrumentPriceDetails": {
"IsMarketOpen": True,
"ShortTradeDisabled": False,
"ValueDate": "2017-05-19"
},
"LastUpdated": "0001-01-01T00:00:00Z",
"PriceInfo": {
"High": 1.09117,
"Low": 1.08853,
"NetChange": 0.00048,
"PercentChange": 0.04
},
"PriceInfoDetails": {
"AskSize": 1000000.0,
"BidSize": 1000000.0,
"LastClose": 1.08932,
"LastTraded": 0.0,
"LastTradedSize": 0.0,
"Open": 0.0,
"Volume": 0.0
},
"Quote": {
"Amount": 100000,
"Ask": 1.74948,
"Bid": 1.74858,
"DelayedByMinutes": 15,
"ErrorCode": "None",
"Mid": 1.74903,
"PriceTypeAsk": "Indicative",
"PriceTypeBid": "Indicative"
},
"Uic": 22
}
},
"_v3_InfoPrices": {
"url": "openapi/trade/v1/infoprices/list",
"params": {
'Uics': '22,23',
'AccountKey': '1a463418-88d4-4555-92e3-e6004d675245',
'<other-parms>': '...',
},
"response": {
"Data": [
{
"AssetType": "FxSpot",
"DisplayAndFormat": {
"Currency": "AUD",
"Decimals": 4,
"Description": "British Pound/Australian Dollar",
"Format": "AllowDecimalPips",
"OrderDecimals": 4,
"Symbol": "GBPAUD"
},
"HistoricalChanges": {
"PercentChange1Month": 1.21,
"PercentChange2Months": 2.95,
"PercentChange3Months": 1.85,
"PercentChange6Months": -1.83,
"PercentChangeWeekly": 1.67
},
"InstrumentPriceDetails": {
"IsMarketOpen": True,
"ShortTradeDisabled": False,
"ValueDate": "2017-05-19"
},
"LastUpdated": "0001-01-01T00:00:00Z",
"PriceInfo": {
"High": 1.09117,
"Low": 1.08853,
"NetChange": 0.00048,
"PercentChange": 0.04
},
"PriceInfoDetails": {
"AskSize": 1000000.0,
"BidSize": 1000000.0,
"LastClose": 1.08932,
"LastTraded": 0.0,
"LastTradedSize": 0.0,
"Open": 0.0,
"Volume": 0.0
},
"Quote": {
"Amount": 100000,
"Ask": 1.74948,
"Bid": 1.74858,
"DelayedByMinutes": 15,
"ErrorCode": "None",
"Mid": 1.74903,
"PriceTypeAsk": "Indicative",
"PriceTypeBid": "Indicative"
},
"Uic": 22
},
{
"AssetType": "FxSpot",
"DisplayAndFormat": {
"Currency": "CAD",
"Decimals": 4,
"Description": "British Pound/Canadian Dollar",
"Format": "AllowDecimalPips",
"OrderDecimals": 4,
"Symbol": "GBPCAD"
},
"InstrumentPriceDetails": {
"IsMarketOpen": True,
"ShortTradeDisabled": False,
"ValueDate": "2017-05-19"
},
"LastUpdated": "0001-01-01T00:00:00Z",
"Quote": {
"Amount": 100000,
"Ask": 1.76278,
"Bid": 1.76198,
"DelayedByMinutes": 15,
"ErrorCode": "None",
"Mid": 1.76238,
"PriceTypeAsk": "Indicative",
"PriceTypeBid": "Indicative"
},
"Uic": 23
}
]
}
},
"_v3_CreateInfoPriceSubscription": {
"url": "openapi/trade/v1/infoprices/subscriptions",
"body": {
"Arguments": {
"AccountKey": "LZTc7DdejXODf-WSl2aCyQ==",
"AssetType": "FxSpot",
"FieldGroups": [
"DisplayAndFormat",
"HistoricalChanges",
"InstrumentPriceDetails",
"PriceInfo",
"PriceInfoDetails",
"Quote"
],
"Uics": "22,23"
},
"ContextId": "20190307094456688",
"ReferenceId": "IP17820",
"RefreshRate": 1000
},
"response": {
"ContextId": "20190307094456688",
"Format": "application/json",
"InactivityTimeout": 60,
"ReferenceId": "IP17820",
"RefreshRate": 1000,
"Snapshot": {
"Data": [
{
"AssetType": "FxSpot",
"DisplayAndFormat": {
"Currency": "AUD",
"Decimals": 4,
"Description": "British Pound/Australian Dollar",
"Format": "AllowDecimalPips",
"OrderDecimals": 4,
"Symbol": "GBPAUD"
},
"HistoricalChanges": {
"PercentChange1Month": 1.21,
"PercentChange2Months": 2.95,
"PercentChange3Months": 1.85,
"PercentChange6Months": -1.83,
"PercentChangeWeekly": 1.67
},
"InstrumentPriceDetails": {
"IsMarketOpen": True,
"ShortTradeDisabled": False,
"ValueDate": "2017-05-19"
},
"LastUpdated": "0001-01-01T00:00:00Z",
"PriceInfo": {
"High": 1.09117,
"Low": 1.08853,
"NetChange": 0.00048,
"PercentChange": 0.04
},
"PriceInfoDetails": {
"AskSize": 1000000.0,
"BidSize": 1000000.0,
"LastClose": 1.08932,
"LastTraded": 0.0,
"LastTradedSize": 0.0,
"Open": 0.0,
"Volume": 0.0
},
"Quote": {
"Amount": 100000,
"Ask": 1.74948,
"Bid": 1.74858,
"DelayedByMinutes": 15,
"ErrorCode": "None",
"Mid": 1.74903,
"PriceTypeAsk": "Indicative",
"PriceTypeBid": "Indicative"
},
"Uic": 22
},
{
"AssetType": "FxSpot",
"DisplayAndFormat": {
"Currency": "CAD",
"Decimals": 4,
"Description": "British Pound/Canadian Dollar",
"Format": "AllowDecimalPips",
"OrderDecimals": 4,
"Symbol": "GBPCAD"
},
"InstrumentPriceDetails": {
"IsMarketOpen": True,
"ShortTradeDisabled": False,
"ValueDate": "2017-05-19"
},
"LastUpdated": "0001-01-01T00:00:00Z",
"Quote": {
"Amount": 100000,
"Ask": 1.76278,
"Bid": 1.76198,
"DelayedByMinutes": 15,
"ErrorCode": "None",
"Mid": 1.76238,
"PriceTypeAsk": "Indicative",
"PriceTypeBid": "Indicative"
},
"Uic": 23
}
]
},
"State": "Active"
}
},
"_v3_RemoveInfoPriceSubscriptionsByTag": {
"url": "openapi/trade/v1/infoprices/subscriptions",
"route": {
'ContextId': 'ctxt_20190316',
},
"params": {
"Tag": "IP"
},
"response": ''
},
"_v3_RemoveInfoPriceSubscriptionById": {
"url": "openapi/trade/v1/infoprices/subscriptions",
"route": {
'ContextId': 'ctxt_20190316',
'ReferenceId': 'IP_EURUSD'
},
"response": ''
},
}
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/trading/responses/infoprices.py
| 0.522933 | 0.420124 |
infoprices.py
|
pypi
|
responses = {
"_v3_GetTradeMessages": {
"url": "openapi/trade/v1/messages",
"response": {
"Data": [
{
"DateTime": "2014-12-12T09:17:12Z",
"DisplayName": "Price Alert",
"DisplayType": "Default",
"IsDiscardable": False,
"MessageBody": "Price alert was triggered on EURUSD",
"MessageHeader": "Price Alert",
"MessageId": "345322",
"MessageType": "PriceAlert"
}
]
}
},
"_v3_MarkMessageAsSeen": {
"url": "openapi/trade/v1/messages/seen/{MessageId}",
"route": {
"MessageId": 345322
},
"response": ''
},
"_v3_CreateTradeMessageSubscription": {
"url": "openapi/trade/v1/messages/subscriptions/",
"body": {
"ContextId": "20190307094456781",
"Format": "application/json",
"ReferenceId": "TM90172",
"RefreshRate": 5,
"Tag": "PAGE1"
},
"response": {
"ContextId": "20190307094456781",
"Format": "application/json",
"InactivityTimeout": 120,
"ReferenceId": "TM90172",
"RefreshRate": 800,
"Snapshot": {
"Data": [
{
"DateTime": "2014-12-12T09:17:12Z",
"DisplayName": "Price Alert",
"DisplayType": "Default",
"IsDiscardable": False,
"MessageBody": "Price alert was triggered on EURUSD",
"MessageHeader": "Price Alert",
"MessageId": "345322",
"MessageType": "PriceAlert"
}
]
},
"State": "Active",
"Tag": "PAGE1"
}
},
"_v3_RemoveTradeMessageSubscriptionById": {
"url": "openapi/trade/v1/messages/subscriptions/"
"{ContextId}/{ReferenceId}",
"route": {
"ContextId": "ctxt_20190318",
"ReferenceId": "M452"
},
"response": ""
},
"_v3_RemoveTradeMessageSubscriptions": {
"url": "openapi/trade/v1/messages/subscriptions/{ContextId}/",
"route": {
"ContextId": "ctxt_20190318",
},
"params": {
"Tag": "CORE"
},
"response": ""
},
}
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/trading/responses/messages.py
| 0.412294 | 0.245057 |
messages.py
|
pypi
|
from ..decorators import dyndoc_insert, endpoint
from .base import Portfolio
from .responses.closedpositions import responses
@endpoint("openapi/port/v1/closedpositions/")
class ClosedPositionList(Portfolio):
"""Returns a list of closed positions fulfilling the criteria
specified by the query string parameters.
"""
@dyndoc_insert(responses)
def __init__(self, params=None):
"""Instantiate a ClosedPositionList request.
Parameters
----------
params: dict (required)
dict representing the querystring parameters
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.portfolio as pf
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> params = {_v3_ClosedPositionList_params}
>>> r = pf.closedpositions.ClosedPositionList(params=params)
>>> client.request(r)
>>> print(json.dumps(r.response, indent=4))
::
{_v3_ClosedPositionList_resp}
"""
super(ClosedPositionList, self).__init__()
self.params = params
@endpoint("openapi/port/v1/closedpositions/{ClosedPositionId}")
class ClosedPositionById(Portfolio):
"""Get a single position by the ClosedPositionId."""
@dyndoc_insert(responses)
def __init__(self, ClosedPositionId, params):
"""Instantiate a ClosedPositionById request.
Parameters
----------
ClosedPositionId: string (required)
the ClosedPositionId
params: dict (required)
dict representing the querystring parameters
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.portfolio as pf
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> ClosedPositionId = '212702698-212702774'
>>> params = {_v3_ClosedPositionById_params}
>>> r = pf.closedpositions.ClosedPositionById(
... ClosedPositionId=ClosedPositionId,
... params=params)
>>> client.request(r)
>>> print(json.dumps(r.response, indent=4))
::
{_v3_ClosedPositionById_resp}
"""
super(ClosedPositionById, self).__init__(
ClosedPositionId=ClosedPositionId)
self.params = params
@endpoint("openapi/port/v1/closedpositions/{ClosedPositionId}/details/")
class ClosedPositionDetails(Portfolio):
"""Gets detailed information about a single position as specified by
the query parameters
"""
@dyndoc_insert(responses)
def __init__(self, ClosedPositionId, params=None):
"""Instantiate a ClosedPositionDetails request.
Parameters
----------
ClosedPositionId: string (required)
the ClosedPositionId
params: dict (required)
dict representing the querystring parameters
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.portfolio as pf
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> ClosedPositionId = '212702698-212702774'
>>> params = {_v3_ClosedPositionDetails_params}
>>> r = pf.closedpositions.ClosedPositionDetails(
... ClosedPositionId=ClosedPositionId,
... params=params)
>>> client.request(r)
>>> print(json.dumps(r.response, indent=4))
::
{_v3_ClosedPositionDetails_resp}
"""
super(ClosedPositionDetails, self).__init__(
ClosedPositionId=ClosedPositionId)
self.params = params
@endpoint("openapi/port/v1/closedpositions/me")
class ClosedPositionsMe(Portfolio):
"""Returns a list of closed positions fulfilling the criteria specified
by the query string parameters.
"""
@dyndoc_insert(responses)
def __init__(self, params=None):
"""Instantiate a ClosedPositionsMe request.
Parameters
----------
params: dict (required)
dict representing the querystring parameters
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.portfolio as pf
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> params = {_v3_ClosedPositionsMe_params}
>>> r = pf.closedpositions.ClosedPositionsMe(params=params)
>>> client.request(r)
>>> print(json.dumps(r.response, indent=4))
::
{_v3_ClosedPositionsMe_resp}
"""
super(ClosedPositionsMe, self).__init__()
self.params = params
@endpoint("openapi/port/v1/closedpositions/subscriptions/", "POST", 201)
class ClosedPositionSubscription(Portfolio):
"""Sets up a subscription and returns an initial snapshot of list of
closed positions specified by the parameters in the request.
"""
@dyndoc_insert(responses)
def __init__(self, data, params=None):
"""Instantiate a ClosedPositionSubscription request.
Parameters
----------
data: dict (required)
dict representing the parameters of the data body
params: dict (required)
dict representing the querystring parameters
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.portfolio as pf
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> params = {_v3_ClosedPositionSubscription_params}
>>> data = {_v3_ClosedPositionSubscription_body}
>>> r = pf.closedpositions.ClosedPositionSubscription(
... data=data,
... params=params)
>>> client.request(r)
>>> print(json.dumps(r.response, indent=4))
Output::
{_v3_ClosedPositionSubscription_resp}
"""
super(ClosedPositionSubscription, self).__init__()
self.data = data
self.params = params
@endpoint("openapi/port/v1/closedpositions/subscriptions/"
"{ContextId}/{ReferenceId}", "PATCH", 200)
class ClosedPositionSubscriptionUpdate(Portfolio):
"""Extends or reduces the page size, number of positions shown, on
a running closed positions subscription. When expanding the page size,
the subsequent closed positions are streamed so to avoid race conditions.
"""
RESPONSE_DATA = None
@dyndoc_insert(responses)
def __init__(self, ContextId, ReferenceId, data):
"""Instantiate a ClosedPositionSubscriptionUpdate request.
Parameters
----------
ContextId: string (required)
the ContextId
ReferenceId: string (required)
the ReferenceId
data: dict (required)
dict representing the parameters of the data body
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.portfolio as pf
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> data = {_v3_ClosedPositionSubscriptionUpdate_body}
>>> ContextId = ''
>>> ReferenceId = ''
>>> r = pf.closedpositions.ClosedPositionSubscriptionUpdate(
... ContextId=ContextId,
... ReferenceId=ReferenceId,
... data=data)
>>> client.request(r)
>>> assert r.status_code == r.expected_status
No data is returned.
"""
super(ClosedPositionSubscriptionUpdate, self).__init__(
ContextId=ContextId,
ReferenceId=ReferenceId)
self.data = data
@endpoint("openapi/port/v1/closedpositions/"
"subscriptions/{ContextId}", "DELETE", 202)
class ClosedPositionSubscriptionsRemove(Portfolio):
"""Removes multiple all subscriptions for the current session on this
resource, and frees all resources on the server.
"""
RESPONSE_DATA = None
@dyndoc_insert(responses)
def __init__(self, ContextId, params=None):
"""Instantiate a ClosedPositionSubscriptionsRemove request.
Parameters
----------
ContextId: string (required)
the ContextId
params: dict (required)
dict representing the parameters of the querystring
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.portfolio as pf
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> r = pf.closedpositions.ClosedPositionSubscriptionsRemove(
... ContextId=ContextId,
... params=params)
>>> client.request(r)
>>> assert r.status_code == r.expected_status
No data is returned.
"""
super(ClosedPositionSubscriptionsRemove, self).__init__(
ContextId=ContextId)
self.params = params
@endpoint("openapi/port/v1/closedpositions/"
"subscriptions/{ContextId}/{ReferenceId}", "DELETE", 202)
class ClosedPositionSubscriptionRemoveById(Portfolio):
"""Removes subscription for the current session identified by
subscription id.
"""
RESPONSE_DATA = None
@dyndoc_insert(responses)
def __init__(self, ContextId, ReferenceId):
"""Instantiate a ClosedPositionSubscriptionRemoveById request.
Parameters
----------
ContextId: string (required)
the ContextId
ReferenceId: string (required)
the ReferenceId
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.portfolio as pf
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> r = pf.closedpositions.ClosedPositionSubscriptionRemoveById(
... ContextId=ContextId,
... ReferenceId=ReferenceId)
>>> client.request(r)
>>> assert r.status_code == r.expected_status
No data is returned.
"""
super(ClosedPositionSubscriptionRemoveById, self).__init__(
ContextId=ContextId,
ReferenceId=ReferenceId)
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/portfolio/closedpositions.py
| 0.768168 | 0.247282 |
closedpositions.py
|
pypi
|
from ..decorators import dyndoc_insert, endpoint
from .base import Portfolio
from .responses.balances import responses
@endpoint("openapi/port/v1/balances/me")
class AccountBalancesMe(Portfolio):
"""Get balance data for a client or an account. Defaults to
logged-in client.
"""
@dyndoc_insert(responses)
def __init__(self):
"""Instantiate an AccountBalancesMe request.
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.portfolio as pf
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> r = pf.balances.AccountBalancesMe()
>>> client.request(r)
>>> print(json.dumps(r.response, indent=4))
Output::
{_v3_AccountBalancesMe_resp}
"""
super(AccountBalancesMe, self).__init__()
@endpoint("openapi/port/v1/balances")
class AccountBalances(Portfolio):
"""Get balance data for a client, account group or an account."""
@dyndoc_insert(responses)
def __init__(self, params):
"""Instantiate an AccountBalance request.
Parameters
----------
params: dict (required)
dict representing the querystring parameters
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.portfolio as pf
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> params = {_v3_AccountBalances_params}
>>> r = pf.balances.AccountBalances(params=params)
>>> client.request(r)
>>> print(json.dumps(r.response, indent=4))
Output::
{_v3_AccountBalances_resp}
"""
super(AccountBalances, self).__init__()
self.params = params
@endpoint("openapi/port/v1/balances/marginoverview/")
class MarginOverview(Portfolio):
"""Get margin overview for a client, account group or an account."""
@dyndoc_insert(responses)
def __init__(self, params):
"""Instantiate an MarginOverview request.
Parameters
----------
params: dict (required)
dict representing the querystring parameters
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.portfolio as pf
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> params = {_v3_MarginOverview_params}
>>> r = pf.balances.MarginOverview(params=params)
>>> client.request(r)
>>> print(json.dumps(r.response, indent=4))
Output::
{_v3_MarginOverview_resp}
"""
super(MarginOverview, self).__init__()
self.params = params
@endpoint("openapi/port/v1/balances/subscriptions", "POST", 201)
class BalanceSubscriptionCreate(Portfolio):
"""Set up a subscription and returns an initial snapshot of a balance."""
@dyndoc_insert(responses)
def __init__(self, data):
"""Instantiate an BalanceSubscriptionCreate request.
Parameters
----------
data: dict (required)
dict representing the data body parameters
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.portfolio as pf
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> data = {_v3_BalanceSubscriptionCreate_body}
>>> r = pf.balances.BalanceSubscriptionCreate(data=data)
>>> client.request(r)
>>> print(json.dumps(r.response, indent=4))
Output::
{_v3_BalanceSubscriptionCreate_resp}
"""
super(BalanceSubscriptionCreate, self).__init__()
self.data = data
@endpoint("openapi/port/v1/balances/subscriptions/{ContextId}", "DELETE", 201)
class BalanceSubscriptionRemoveByTag(Portfolio):
"""Remove multiple subscriptions for the current session on this
resource and frees all resources on the server.
"""
RESPONSE_DATA = None
@dyndoc_insert(responses)
def __init__(self, ContextId, params):
"""Instantiate an BalanceSubscriptionRemoveByTag request.
Parameters
----------
ContextId: string (required)
the ContextId
params: dict (required)
dict representing the querystring parameters
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.portfolio as pf
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> params = {_v3_BalanceSubscriptionRemoveByTag_params}
>>> ContextId = "explorer_1551792578055"
>>> r = pf.BalanceSubscriptionRemoveByTag(ContextId=ContextId,
... params=params)
>>> client.request(r)
>>> assert r.status_code == r.expected_status
No data is returned.
"""
super(BalanceSubscriptionRemoveByTag, self).__init__(
ContextId=ContextId)
self.params = params
@endpoint("openapi/port/v1/balances/subscriptions/"
"{ContextId}/{ReferenceId}", "DELETE", 201)
class BalanceSubscriptionRemoveById(Portfolio):
"""Removes subscription for the current session identified by
subscription id.
"""
RESPONSE_DATA = None
@dyndoc_insert(responses)
def __init__(self, ContextId, ReferenceId):
"""Instantiate an BalanceSubscriptionRemoveById request.
Parameters
----------
ContextId: string (required)
the ContextId
ReferenceId: string (required)
the ReferenceId
>>> import saxo_openapi
>>> import saxo_openapi.endpoints.portfolio as pf
>>> import json
>>> client = saxo_openapi.API(access_token=...)
>>> ContextId = "explorer_1551792578055"
>>> ReferenceId = "G_953"
>>> r = pf.BalanceSubscriptionRemoveById(ContextId=ContextId,
... ReferenceId=ReferenceId)
>>> client.request(r)
>>> assert r.status_code == r.expected_status
No data is returned.
"""
super(BalanceSubscriptionRemoveById, self).__init__(
ContextId=ContextId,
ReferenceId=ReferenceId)
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/portfolio/balances.py
| 0.762247 | 0.154185 |
balances.py
|
pypi
|
responses = {
"_v3_ClosedPositionList": {
"url": "/openapi/port/v1/closedpositions/",
"params": {},
"response": {
"__count": 5,
"Data": [
{
"ClosedPosition": {
"AccountId": "9226397",
"Amount": 80000,
"AssetType": "FxSpot",
"BuyOrSell": "Buy",
"ClientId": "9226397",
"ClosedProfitLoss": -260,
"ClosedProfitLossInBaseCurrency": -171.1138,
"ClosingMarketValue": 0,
"ClosingMarketValueInBaseCurrency": 0,
"ClosingMethod": "Fifo",
"ClosingPositionId": "212702774",
"ClosingPrice": 1.75612,
"ConversionRateInstrumentToBaseSettledClosing": False,
"ConversionRateInstrumentToBaseSettledOpening": False,
"CostClosing": -7.02,
"CostClosingInBaseCurrency": -4.62,
"CostOpening": -7.04,
"CostOpeningInBaseCurrency": -4.63,
"ExecutionTimeClose": "2019-03-05T22:57:51.935866Z",
"ExecutionTimeOpen": "2019-03-05T22:39:43.738721Z",
"OpeningPositionId": "212702698",
"OpenPrice": 1.75937,
"Uic": 23
},
"ClosedPositionUniqueId": "212702698-212702774",
"NetPositionId": "GBPCAD__FxSpot"
},
{
"ClosedPosition": {
"AccountId": "9226397",
"Amount": -100000,
"AssetType": "FxSpot",
"BuyOrSell": "Sell",
"ClientId": "9226397",
"ClosedProfitLoss": 29,
"ClosedProfitLossInBaseCurrency": 25.6447,
"ClosingMarketValue": 0,
"ClosingMarketValueInBaseCurrency": 0,
"ClosingMethod": "Fifo",
"ClosingPositionId": "212702772",
"ClosingPrice": 1.13025,
"ConversionRateInstrumentToBaseSettledClosing": False,
"ConversionRateInstrumentToBaseSettledOpening": False,
"CostClosing": -5.65,
"CostClosingInBaseCurrency": -5,
"CostOpening": -5.65,
"CostOpeningInBaseCurrency": -5,
"ExecutionTimeClose": "2019-03-05T22:57:51.776721Z",
"ExecutionTimeOpen": "2019-03-05T22:39:43.546536Z",
"OpeningPositionId": "212702696",
"OpenPrice": 1.13054,
"Uic": 21
},
"ClosedPositionUniqueId": "212702696-212702772",
"NetPositionId": "EURUSD__FxSpot"
},
{
"ClosedPosition": {
"AccountId": "9226397",
"Amount": 10000,
"AssetType": "FxSpot",
"BuyOrSell": "Buy",
"ClientId": "9226397",
"ClosedProfitLoss": -13.2,
"ClosedProfitLossInBaseCurrency": -11.67276,
"ClosingMarketValue": 0,
"ClosingMarketValueInBaseCurrency": 0,
"ClosingMethod": "Fifo",
"ClosingPositionId": "212702680",
"ClosingPrice": 1.31731,
"ConversionRateInstrumentToBaseSettledClosing": False,
"ConversionRateInstrumentToBaseSettledOpening": True,
"CostClosing": -3,
"CostClosingInBaseCurrency": -2.65,
"CostOpening": -3,
"CostOpeningInBaseCurrency": -2.65,
"ExecutionTimeClose": "2019-03-05T22:23:38.888231Z",
"ExecutionTimeOpen": "2019-03-04T17:11:39.129241Z",
"OpeningPositionId": "212675868",
"OpenPrice": 1.31863,
"Uic": 31
},
"ClosedPositionUniqueId": "212675868-212702680",
"NetPositionId": "GBPUSD__FxSpot"
},
{
"ClosedPosition": {
"AccountId": "9226397",
"Amount": 100000,
"AssetType": "FxSpot",
"BuyOrSell": "Buy",
"ClientId": "9226397",
"ClosedProfitLoss": 50,
"ClosedProfitLossInBaseCurrency": 32.9065,
"ClosingMarketValue": 0,
"ClosingMarketValueInBaseCurrency": 0,
"ClosingMethod": "Fifo",
"ClosingPositionId": "212702664",
"ClosingPrice": 1.75878,
"ConversionRateInstrumentToBaseSettledClosing": False,
"ConversionRateInstrumentToBaseSettledOpening": True,
"CostClosing": -8.79,
"CostClosingInBaseCurrency": -5.78,
"CostOpening": -8.79,
"CostOpeningInBaseCurrency": -5.82,
"ExecutionTimeClose": "2019-03-05T22:22:51.922693Z",
"ExecutionTimeOpen": "2019-03-03T23:34:51.823660Z",
"OpeningPositionId": "212550210",
"OpenPrice": 1.75828,
"Uic": 23
},
"ClosedPositionUniqueId": "212550210-212702664",
"NetPositionId": "GBPCAD__FxSpot"
},
{
"ClosedPosition": {
"AccountId": "9226397",
"Amount": 400000,
"AssetType": "FxSpot",
"BuyOrSell": "Buy",
"ClientId": "9226397",
"ClosedProfitLoss": -1800,
"ClosedProfitLossInBaseCurrency": -1118.124,
"ClosingMarketValue": 0,
"ClosingMarketValueInBaseCurrency": 0,
"ClosingMethod": "Fifo",
"ClosingPositionId": "212702660",
"ClosingPrice": 1.85952,
"ConversionRateInstrumentToBaseSettledClosing": False,
"ConversionRateInstrumentToBaseSettledOpening": True,
"CostClosing": -37.19,
"CostClosingInBaseCurrency": -23.1,
"CostOpening": -29.824,
"CostOpeningInBaseCurrency": -18.62,
"ExecutionTimeClose": "2019-03-05T22:22:07.523028Z",
"ExecutionTimeOpen": "2019-03-03T23:35:08.243690Z",
"OpeningPositionId": "212550212",
"OpenPrice": 1.86402,
"Uic": 22
},
"ClosedPositionUniqueId": "212550212-212702660",
"NetPositionId": "GBPAUD__FxSpot"
}
]
}
},
"_v3_ClosedPositionById": {
"url": "/openapi/port/v1/closedpositions/{ClosedPositionId}",
"params": {'ClientKey': 'Cf4xZWiYL6W1nMKpygBLLA=='},
"response": {
"ClosedPosition": {
"AccountId": "9226397",
"Amount": 80000,
"AssetType": "FxSpot",
"BuyOrSell": "Buy",
"ClientId": "9226397",
"ClosedProfitLoss": -260,
"ClosedProfitLossInBaseCurrency": -171.0969,
"ClosingMarketValue": 0,
"ClosingMarketValueInBaseCurrency": 0,
"ClosingMethod": "Fifo",
"ClosingPositionId": "212702774",
"ClosingPrice": 1.75612,
"ConversionRateInstrumentToBaseSettledClosing": False,
"ConversionRateInstrumentToBaseSettledOpening": False,
"CostClosing": -7.02,
"CostClosingInBaseCurrency": -4.62,
"CostOpening": -7.04,
"CostOpeningInBaseCurrency": -4.63,
"ExecutionTimeClose": "2019-03-05T22:57:51.935866Z",
"ExecutionTimeOpen": "2019-03-05T22:39:43.738721Z",
"OpeningPositionId": "212702698",
"OpenPrice": 1.75937,
"Uic": 23
},
"ClosedPositionUniqueId": "212702698-212702774",
"NetPositionId": "GBPCAD__FxSpot"
}
},
"_v3_ClosedPositionDetails": {
"url": "/openapi/port/v1/closedpositions/{ClosedPositionId}/details",
"params": {'ClientKey': 'Cf4xZWiYL6W1nMKpygBLLA=='},
"response": {
"ClosedPosition": {
"AccountId": "9226397",
"Amount": 80000,
"AssetType": "FxSpot",
"BuyOrSell": "Buy",
"ClientId": "9226397",
"ClosedProfitLoss": -260,
"ClosedProfitLossInBaseCurrency": -171.1385,
"ClosingMarketValue": 0,
"ClosingMarketValueInBaseCurrency": 0,
"ClosingMethod": "Fifo",
"ClosingPositionId": "212702774",
"ClosingPrice": 1.75612,
"ConversionRateInstrumentToBaseSettledClosing": False,
"ConversionRateInstrumentToBaseSettledOpening": False,
"CostClosing": -7.02,
"CostClosingInBaseCurrency": -4.62,
"CostOpening": -7.04,
"CostOpeningInBaseCurrency": -4.63,
"ExecutionTimeClose": "2019-03-05T22:57:51.935866Z",
"ExecutionTimeOpen": "2019-03-05T22:39:43.738721Z",
"OpeningPositionId": "212702698",
"OpenPrice": 1.75937,
"Uic": 23
},
"ClosedPositionDetails": {
"CostClosing": {
"Commission": -7.02
},
"CostClosingInBaseCurrency": {
"Commission": -4.62
},
"CostOpening": {
"Commission": -7.04
},
"CostOpeningInBaseCurrency": {
"Commission": -4.63
},
"CurrencyConversionRateInstrumentToBaseClosing": 0.658225,
"CurrencyConversionRateInstrumentToBaseOpening": 0.658225,
"ValueDateClose": "2019-03-08T00:00:00.000000Z",
"ValueDateOpen": "2019-03-08T00:00:00.000000Z"
},
"ClosedPositionUniqueId": "212702698-212702774",
"DisplayAndFormat": {
"Currency": "CAD",
"Decimals": 4,
"Description": "British Pound/Canadian Dollar",
"Format": "AllowDecimalPips",
"Symbol": "GBPCAD"
},
"Exchange": {
"Description": "Inter Bank",
"ExchangeId": "SBFX",
"IsOpen": True
},
"NetPositionId": "GBPCAD__FxSpot"
}
},
"_v3_ClosedPositionsMe": {
"url": "/openapi/port/v1/closedpositions/me",
"params": {},
"response": {
"__count": 3,
"Data": [
{
"ClosedPosition": {
"AccountId": "9226397",
"Amount": -40000,
"AssetType": "FxSpot",
"BuyOrSell": "Sell",
"ClientId": "9226397",
"ClosedProfitLoss": -582.8,
"ClosedProfitLossInBaseCurrency": -383.377496,
"ClosingMarketValue": 0,
"ClosingMarketValueInBaseCurrency": 0,
"ClosingMethod": "Fifo",
"ClosingPositionId": "212725160",
"ClosingPrice": 1.77074,
"ConversionRateInstrumentToBaseSettledClosing": False,
"ConversionRateInstrumentToBaseSettledOpening": True,
"CostClosing": -4.03,
"CostClosingInBaseCurrency": -2.65,
"CostOpening": -3.51,
"CostOpeningInBaseCurrency": -2.32,
"ExecutionTimeClose": "2019-03-06T23:07:47.040598Z",
"ExecutionTimeOpen": "2019-03-06T10:24:50.635259Z",
"OpeningPositionId": "212710176",
"OpenPrice": 1.75617,
"Uic": 23
},
"ClosedPositionUniqueId": "212710176-212725160",
"NetPositionId": "GBPCAD__FxSpot"
},
{
"ClosedPosition": {
"AccountId": "9226397",
"Amount": -40000,
"AssetType": "FxSpot",
"BuyOrSell": "Sell",
"ClientId": "9226397",
"ClosedProfitLoss": -590.8,
"ClosedProfitLossInBaseCurrency": -388.640056,
"ClosingMarketValue": 0,
"ClosingMarketValueInBaseCurrency": 0,
"ClosingMethod": "Fifo",
"ClosingPositionId": "212725128",
"ClosingPrice": 1.77094,
"ConversionRateInstrumentToBaseSettledClosing": False,
"ConversionRateInstrumentToBaseSettledOpening": True,
"CostClosing": -4.03,
"CostClosingInBaseCurrency": -2.65,
"CostOpening": -3.51,
"CostOpeningInBaseCurrency": -2.32,
"ExecutionTimeClose": "2019-03-06T23:02:56.295679Z",
"ExecutionTimeOpen": "2019-03-06T10:24:50.635259Z",
"OpeningPositionId": "212710176",
"OpenPrice": 1.75617,
"Uic": 23
},
"ClosedPositionUniqueId": "212710176-212725128",
"NetPositionId": "GBPCAD__FxSpot"
},
{
"ClosedPosition": {
"AccountId": "9226397",
"Amount": 40000,
"AssetType": "FxSpot",
"BuyOrSell": "Buy",
"ClientId": "9226397",
"ClosedProfitLoss": 6,
"ClosedProfitLossInBaseCurrency": 5.30466,
"ClosingMarketValue": 0,
"ClosingMarketValueInBaseCurrency": 0,
"ClosingMethod": "Fifo",
"ClosingPositionId": "212724952",
"ClosingPrice": 1.13076,
"ConversionRateInstrumentToBaseSettledClosing": False,
"ConversionRateInstrumentToBaseSettledOpening": True,
"CostClosing": -3,
"CostClosingInBaseCurrency": -2.65,
"CostOpening": -2.26,
"CostOpeningInBaseCurrency": -2,
"ExecutionTimeClose": "2019-03-06T22:55:59.228387Z",
"ExecutionTimeOpen": "2019-03-06T10:24:50.460091Z",
"OpeningPositionId": "212710174",
"OpenPrice": 1.13061,
"Uic": 21
},
"ClosedPositionUniqueId": "212710174-212724952",
"NetPositionId": "EURUSD__FxSpot"
}
]
}
},
"_v3_ClosedPositionSubscription": {
"url": "/openapi/port/v1/closedpositions/subscriptions",
"params": {},
"body": {
"Arguments": {
"ClientKey": "Cf4xZWiYL6W1nMKpygBLLA=="
},
"ContextId": "explorer_1551913039211",
"ReferenceId": "D_975"
},
"response": {
"ContextId": "explorer_1551913039211",
"Format": "application/json",
"InactivityTimeout": 30,
"ReferenceId": "D_975",
"RefreshRate": 1000,
"Snapshot": {
"Data": [
{
"ClosedPosition": {
"AccountId": "9226397",
"Amount": -40000,
"AssetType": "FxSpot",
"BuyOrSell": "Sell",
"ClientId": "9226397",
"ClosedProfitLoss": -582.8,
"ClosedProfitLossInBaseCurrency": -383.389152,
"ClosingMarketValue": 0,
"ClosingMarketValueInBaseCurrency": 0,
"ClosingMethod": "Fifo",
"ClosingPositionId": "212725160",
"ClosingPrice": 1.77074,
"ConversionRateInstrumentToBaseSettledClosing": False,
"ConversionRateInstrumentToBaseSettledOpening": True,
"CostClosing": -4.03,
"CostClosingInBaseCurrency": -2.65,
"CostOpening": -3.51,
"CostOpeningInBaseCurrency": -2.32,
"ExecutionTimeClose": "2019-03-06T23:07:47.040598Z",
"ExecutionTimeOpen": "2019-03-06T10:24:50.635259Z",
"OpeningPositionId": "212710176",
"OpenPrice": 1.75617,
"Uic": 23
},
"ClosedPositionUniqueId": "212710176-212725160",
"NetPositionId": "GBPCAD__FxSpot"
},
{
"ClosedPosition": {
"AccountId": "9226397",
"Amount": -40000,
"AssetType": "FxSpot",
"BuyOrSell": "Sell",
"ClientId": "9226397",
"ClosedProfitLoss": -590.8,
"ClosedProfitLossInBaseCurrency": -388.651872,
"ClosingMarketValue": 0,
"ClosingMarketValueInBaseCurrency": 0,
"ClosingMethod": "Fifo",
"ClosingPositionId": "212725128",
"ClosingPrice": 1.77094,
"ConversionRateInstrumentToBaseSettledClosing": False,
"ConversionRateInstrumentToBaseSettledOpening": True,
"CostClosing": -4.03,
"CostClosingInBaseCurrency": -2.65,
"CostOpening": -3.51,
"CostOpeningInBaseCurrency": -2.32,
"ExecutionTimeClose": "2019-03-06T23:02:56.295679Z",
"ExecutionTimeOpen": "2019-03-06T10:24:50.635259Z",
"OpeningPositionId": "212710176",
"OpenPrice": 1.75617,
"Uic": 23
},
"ClosedPositionUniqueId": "212710176-212725128",
"NetPositionId": "GBPCAD__FxSpot"
},
{
"ClosedPosition": {
"AccountId": "9226397",
"Amount": 40000,
"AssetType": "FxSpot",
"BuyOrSell": "Buy",
"ClientId": "9226397",
"ClosedProfitLoss": 6,
"ClosedProfitLossInBaseCurrency": 5.30445,
"ClosingMarketValue": 0,
"ClosingMarketValueInBaseCurrency": 0,
"ClosingMethod": "Fifo",
"ClosingPositionId": "212724952",
"ClosingPrice": 1.13076,
"ConversionRateInstrumentToBaseSettledClosing": False,
"ConversionRateInstrumentToBaseSettledOpening": True,
"CostClosing": -3,
"CostClosingInBaseCurrency": -2.65,
"CostOpening": -2.26,
"CostOpeningInBaseCurrency": -2,
"ExecutionTimeClose": "2019-03-06T22:55:59.228387Z",
"ExecutionTimeOpen": "2019-03-06T10:24:50.460091Z",
"OpeningPositionId": "212710174",
"OpenPrice": 1.13061,
"Uic": 21
},
"ClosedPositionUniqueId": "212710174-212724952",
"NetPositionId": "EURUSD__FxSpot"
}
],
"MaxRows": 100000
},
"State": "Active"
}
},
"_v3_ClosedPositionSubscriptionUpdate": {
"url": "/openapi/port/v1/closedpositions/subscriptions/"
"{ContextId}/{ReferenceId}/",
"body": {'NewPageSize': 25630},
"response": ''
},
"_v3_ClosedPositionSubscriptionsRemove": {
"url": "/openapi/port/v1/closedpositions/subscriptions/{ContextId}/",
"route": {
'ContextId': 29931122,
},
"params": {'Tag': 2345223},
"response": ''
},
"_v3_ClosedPositionSubscriptionRemoveById": {
"url": "/openapi/port/v1/closedpositions/subscriptions/"
"{ContextId}/{ReferenceId}/",
"route": {
'ContextId': 29931122,
'ReferenceId': '0f8fad5b-d9cb-469f-a165-70867728950e',
},
"response": ''
}
}
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/portfolio/responses/closedpositions.py
| 0.477554 | 0.343672 |
closedpositions.py
|
pypi
|
responses = {
"_v3_AccountBalancesMe": {
"url": "/port/v1/balances/me",
"response": {
"CalculationReliability": "Ok",
"CashBalance": 999956.74,
"ChangesScheduled": False,
"ClosedPositionsCount": 0,
"CollateralCreditValue": {
"Line": 978723.62,
"UtilzationPct": 0
},
"CostToClosePositions": -37.39,
"Currency": "EUR",
"CurrencyDecimals": 2,
"InitialMargin": {
"MarginAvailable": 978723.61,
"MarginUsedByCurrentPositions": -17662.4,
"MarginUtilizationPct": 1.77,
"NetEquityForMargin": 996386.01
},
"IsPortfolioMarginModelSimple": True,
"MarginAvailableForTrading": 978723.61,
"MarginCollateralNotAvailable": 0,
"MarginExposureCoveragePct": 141.03,
"MarginNetExposure": 706507.11,
"MarginUsedByCurrentPositions": -17662.4,
"MarginUtilizationPct": 1.77,
"NetEquityForMargin": 996386.01,
"NetPositionsCount": 3,
"NonMarginPositionsValue": 0,
"OpenPositionsCount": 3,
"OptionPremiumsMarketValue": 0,
"OrdersCount": 1,
"OtherCollateral": 0,
"TotalValue": 996386.01,
"TransactionsNotBooked": 0,
"UnrealizedMarginClosedProfitLoss": 0,
"UnrealizedMarginOpenProfitLoss": -3533.34,
"UnrealizedMarginProfitLoss": -3533.34,
"UnrealizedPositionsValue": -3570.73
}
},
"_v3_AccountBalances": {
"url": "/port/v1/balances",
"params": {'ClientKey': 'Cf4xZWiYL6W1nMKpygBLLA=='},
"response": {
"CalculationReliability": "Ok",
"CashBalance": 999956.74,
"ChangesScheduled": False,
"ClosedPositionsCount": 0,
"CollateralCreditValue": {
"Line": 979022.6,
"UtilzationPct": 0
},
"CostToClosePositions": -37.46,
"Currency": "EUR",
"CurrencyDecimals": 2,
"InitialMargin": {
"MarginAvailable": 979022.6,
"MarginUsedByCurrentPositions": -17692.44,
"MarginUtilizationPct": 1.78,
"NetEquityForMargin": 996715.04
},
"IsPortfolioMarginModelSimple": True,
"MarginAvailableForTrading": 979022.6,
"MarginCollateralNotAvailable": 0,
"MarginExposureCoveragePct": 140.84,
"MarginNetExposure": 707697.6,
"MarginUsedByCurrentPositions": -17692.44,
"MarginUtilizationPct": 1.78,
"NetEquityForMargin": 996715.04,
"NetPositionsCount": 3,
"NonMarginPositionsValue": 0,
"OpenPositionsCount": 3,
"OptionPremiumsMarketValue": 0,
"OrdersCount": 1,
"OtherCollateral": 0,
"TotalValue": 996715.04,
"TransactionsNotBooked": 0,
"UnrealizedMarginClosedProfitLoss": 0,
"UnrealizedMarginOpenProfitLoss": -3204.24,
"UnrealizedMarginProfitLoss": -3204.24,
"UnrealizedPositionsValue": -3241.7
}
},
"_v3_MarginOverview": {
"url": "/port/v1/balances/marginoverview",
"params": {'ClientKey': 'Cf4xZWiYL6W1nMKpygBLLA=='},
"response": {
"Groups": [
{
"Contributors": [
{
"AssetTypes": [
"FxSpot"
],
"InstrumentDescription": "British Pound/Canadian Dollar",
"InstrumentSpecifier": "GBPCAD",
"Margin": 2908,
"Uic": 23
},
{
"AssetTypes": [
"FxSpot"
],
"InstrumentDescription": "British Pound/Australian Dollar",
"InstrumentSpecifier": "GBPAUD",
"Margin": 14540,
"Uic": 22
},
{
"AssetTypes": [
"FxSpot"
],
"InstrumentDescription": "British Pound/US Dollar",
"InstrumentSpecifier": "GBPUSD",
"Margin": 291,
"Uic": 31
}
],
"GroupType": "FX",
"TotalMargin": 17739
}
]
}
},
"_v3_BalanceSubscriptionCreate": {
"url": "/port/v1/balances/subscriptions",
"body": {
"Arguments": {
"ClientKey": "Cf4xZWiYL6W1nMKpygBLLA=="
},
"ContextId": "explorer_1551792578055",
"ReferenceId": "U_452"
},
"response": {
"ContextId": "explorer_1551792578055",
"Format": "application/json",
"InactivityTimeout": 30,
"ReferenceId": "U_452",
"RefreshRate": 1000,
"Snapshot": {
"CalculationReliability": "Ok",
"CashBalance": 999956.74,
"ChangesScheduled": False,
"ClosedPositionsCount": 0,
"CollateralCreditValue": {
"Line": 979847,
"UtilzationPct": 0
},
"CostToClosePositions": -37.54,
"Currency": "EUR",
"CurrencyDecimals": 2,
"InitialMargin": {
"MarginAvailable": 979847,
"MarginUsedByCurrentPositions": -17733.77,
"MarginUtilizationPct": 1.78,
"NetEquityForMargin": 997580.77
},
"IsPortfolioMarginModelSimple": True,
"MarginAvailableForTrading": 979847,
"MarginCollateralNotAvailable": 0,
"MarginExposureCoveragePct": 140.63,
"MarginNetExposure": 709350.7,
"MarginUsedByCurrentPositions": -17733.77,
"MarginUtilizationPct": 1.78,
"NetEquityForMargin": 997580.77,
"NetPositionsCount": 3,
"NonMarginPositionsValue": 0,
"OpenPositionsCount": 3,
"OptionPremiumsMarketValue": 0,
"OrdersCount": 1,
"OtherCollateral": 0,
"TotalValue": 997580.77,
"TransactionsNotBooked": 0,
"UnrealizedMarginClosedProfitLoss": 0,
"UnrealizedMarginOpenProfitLoss": -2338.43,
"UnrealizedMarginProfitLoss": -2338.43,
"UnrealizedPositionsValue": -2375.97
},
"State": "Active"
}
},
"_v3_BalanceSubscriptionRemoveByTag": {
"url": "/port/v1/balances/subscriptions/{ContextId}",
"params": {'Tag': 'PAGE1'},
"response": ''
},
"_v3_BalanceSubscriptionRemoveById": {
"url": "/port/v1/balances/subscriptions/{ContextId}/ReferenceId}",
"response": ''
},
}
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/portfolio/responses/balances.py
| 0.522933 | 0.410343 |
balances.py
|
pypi
|
responses = {
"_v3_SinglePosition": {
"url": "/openapi/port/v1/positions/{PositionId}",
"params": {'ClientKey': 'Cf4xZWiYL6W1nMKpygBLLA=='},
"response": {
"NetPositionId": "EURUSD__FxSpot",
"PositionBase": {
"AccountId": "9226397",
"Amount": -100000,
"AssetType": "FxSpot",
"CanBeClosed": True,
"ClientId": "9226397",
"CloseConversionRateSettled": False,
"CorrelationKey": "46dc6b2a-5b6f-43c8-b747-6b530da9110e",
"ExecutionTimeOpen": "2019-03-04T00:10:23.040641Z",
"IsMarketOpen": True,
"OpenPrice": 1.13715,
"RelatedOpenOrders": [],
"SourceOrderId": "76271915",
"SpotDate": "2019-03-06",
"Status": "Open",
"Uic": 21,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"PositionId": "212561926",
"PositionView": {
"CalculationReliability": "Ok",
"ConversionRateCurrent": 0.88199,
"ConversionRateOpen": 0.88199,
"CurrentPrice": 1.1339,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Ask",
"Exposure": -100000,
"ExposureCurrency": "EUR",
"ExposureInBaseCurrency": -100000,
"InstrumentPriceDayPercentChange": -0.24,
"ProfitLossOnTrade": 325,
"ProfitLossOnTradeInBaseCurrency": 286.65,
"TradeCostsTotal": -11.36,
"TradeCostsTotalInBaseCurrency": -10.02
}
}
},
"_v3_SinglePositionDetails": {
"url": "/openapi/port/v1/positions/{PositionId}/details",
"params": {'ClientKey': 'Cf4xZWiYL6W1nMKpygBLLA==',
'AccountKey': 'Cf4xZWiYL6W1nMKpygBLLA=='},
"response": {
"DisplayAndFormat": {
"Currency": "USD",
"Decimals": 4,
"Description": "Euro/US Dollar",
"Format": "AllowDecimalPips",
"Symbol": "EURUSD"
},
"Exchange": {
"Description": "Inter Bank",
"ExchangeId": "SBFX",
"IsOpen": True
},
"NetPositionId": "EURUSD__FxSpot",
"PositionBase": {
"AccountId": "9226397",
"Amount": -100000,
"AssetType": "FxSpot",
"CanBeClosed": True,
"ClientId": "9226397",
"CloseConversionRateSettled": False,
"CorrelationKey": "46dc6b2a-5b6f-43c8-b747-6b530da9110e",
"ExecutionTimeOpen": "2019-03-04T00:10:23.040641Z",
"IsMarketOpen": True,
"OpenPrice": 1.13715,
"RelatedOpenOrders": [],
"SourceOrderId": "76271915",
"SpotDate": "2019-03-06",
"Status": "Open",
"Uic": 21,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"PositionDetails": {
"CloseCost": {
"Commission": 5.67
},
"CloseCostInBaseCurrency": {
"Commission": 5
},
"CorrelationKey": "46dc6b2a-5b6f-43c8-b747-6b530da9110e",
"LockedByBackOffice": False,
"MarketValue": 351,
"OpenCost": {
"Commission": 5.69
},
"OpenCostInBaseCurrency": {
"Commission": 5.02
},
"SourceOrderId": "76271915"
},
"PositionId": "212561926",
"PositionView": {
"CalculationReliability": "Ok",
"ConversionRateCurrent": 0.882195,
"ConversionRateOpen": 0.882195,
"CurrentPrice": 1.13364,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Ask",
"Exposure": -100000,
"ExposureCurrency": "EUR",
"ExposureInBaseCurrency": -100000,
"InsTrumentPriceDayPercentChange": -0.26,
"ProfitLossOnTrade": 351,
"ProfitLossOnTradeInBaseCurrency": 309.65,
"TradeCostsTotal": -11.36,
"TradeCostsTotalInBaseCurrency": -10.02
}
}
},
"_v3_PositionsMe": {
"url": "/openapi/port/v1/positions/me",
"params": {},
"response": {
"__count": 4,
"Data": [
{
"NetPositionId": "EURUSD__FxSpot",
"PositionBase": {
"AccountId": "9226397",
"Amount": -100000,
"AssetType": "FxSpot",
"CanBeClosed": True,
"ClientId": "9226397",
"CloseConversionRateSettled": False,
"CorrelationKey": "46dc6b2a-5b6f-43c8-b747-6b530da9110e",
"ExecutionTimeOpen": "2019-03-04T00:10:23.040641Z",
"IsMarketOpen": True,
"OpenPrice": 1.13715,
"RelatedOpenOrders": [],
"SourceOrderId": "76271915",
"SpotDate": "2019-03-06",
"Status": "Open",
"Uic": 21,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"PositionId": "212561926",
"PositionView": {
"CalculationReliability": "Ok",
"ConversionRateCurrent": 0.882595,
"ConversionRateOpen": 0.882595,
"CurrentPrice": 1.13312,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Ask",
"Exposure": -100000,
"ExposureCurrency": "EUR",
"ExposureInBaseCurrency": -100000,
"InstrumentPriceDayPercentChange": -0.31,
"ProfitLossOnTrade": 403,
"ProfitLossOnTradeInBaseCurrency": 355.69,
"TradeCostsTotal": -11.36,
"TradeCostsTotalInBaseCurrency": -10.03
}
},
{
"NetPositionId": "EURUSD__FxSpot",
"PositionBase": {
"AccountId": "9226397",
"Amount": 100000,
"AssetType": "FxSpot",
"CanBeClosed": True,
"ClientId": "9226397",
"CloseConversionRateSettled": False,
"CorrelationKey": "50fae087-b7d4-49ab-afa2-5145cd56a7c5",
"ExecutionTimeOpen": "2019-03-04T00:04:11.340151Z",
"IsMarketOpen": True,
"OpenPrice": 1.1371,
"RelatedOpenOrders": [],
"SourceOrderId": "76271912",
"SpotDate": "2019-03-06",
"Status": "Open",
"Uic": 21,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"PositionId": "212561892",
"PositionView": {
"CalculationReliability": "Ok",
"ConversionRateCurrent": 0.882595,
"ConversionRateOpen": 0.882595,
"CurrentPrice": 1.13292,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Bid",
"Exposure": 100000,
"ExposureCurrency": "EUR",
"ExposureInBaseCurrency": 100000,
"InstrumentPriceDayPercentChange": -0.31,
"ProfitLossOnTrade": -418,
"ProfitLossOnTradeInBaseCurrency": -368.92,
"TradeCostsTotal": -11.35,
"TradeCostsTotalInBaseCurrency": -10.02
}
},
{
"NetPositionId": "GBPAUD__FxSpot",
"PositionBase": {
"AccountId": "9226397",
"Amount": 500000,
"AssetType": "FxSpot",
"CanBeClosed": True,
"ClientId": "9226397",
"CloseConversionRateSettled": False,
"CorrelationKey": "206cceed-2240-43f8-8c46-840e8b722549",
"ExecutionTimeOpen": "2019-03-03T23:35:08.243690Z",
"IsMarketOpen": True,
"OpenPrice": 1.86391,
"RelatedOpenOrders": [],
"SourceOrderId": "76271862",
"SpotDate": "2019-03-06",
"Status": "Open",
"Uic": 22,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"PositionId": "212550212",
"PositionView": {
"CalculationReliability": "Ok",
"ConversionRateCurrent": 0.6254,
"ConversionRateOpen": 0.6254,
"CurrentPrice": 1.85999,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Bid",
"Exposure": 500000,
"ExposureCurrency": "GBP",
"ExposureInBaseCurrency": 581757.5,
"InstrumentPriceDayPercentChange": -0.25,
"ProfitLossOnTrade": -1960,
"ProfitLossOnTradeInBaseCurrency": -1225.78,
"TradeCostsTotal": -93.1,
"TradeCostsTotalInBaseCurrency": -58.22
}
},
{
"NetPositionId": "GBPCAD__FxSpot",
"PositionBase": {
"AccountId": "9226397",
"Amount": 100000,
"AssetType": "FxSpot",
"CanBeClosed": True,
"ClientId": "9226397",
"CloseConversionRateSettled": False,
"CorrelationKey": "19c44107-6858-4191-805c-764a69d27491",
"ExecutionTimeOpen": "2019-03-03T23:34:51.823660Z",
"IsMarketOpen": True,
"OpenPrice": 1.75824,
"RelatedOpenOrders": [],
"SourceOrderId": "76271861",
"SpotDate": "2019-03-06",
"Status": "Open",
"Uic": 23,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"PositionId": "212550210",
"PositionView": {
"CalculationReliability": "Ok",
"ConversionRateCurrent": 0.663595,
"ConversionRateOpen": 0.663595,
"CurrentPrice": 1.75294,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Bid",
"Exposure": 100000,
"ExposureCurrency": "GBP",
"ExposureInBaseCurrency": 116351.5,
"InstrumentPriceDayPercentChange": -0.18,
"ProfitLossOnTrade": -530,
"ProfitLossOnTradeInBaseCurrency": -351.71,
"TradeCostsTotal": -17.55,
"TradeCostsTotalInBaseCurrency": -11.65
}
}
]
}
},
"_v3_PositionsQuery": {
"url": "/openapi/port/v1/positions/",
"params": {'ClientKey': 'Cf4xZWiYL6W1nMKpygBLLA=='},
"response": {
"__count": 4,
"Data": [
{
"NetPositionId": "EURUSD__FxSpot",
"PositionBase": {
"AccountId": "9226397",
"Amount": -100000,
"AssetType": "FxSpot",
"CanBeClosed": True,
"ClientId": "9226397",
"CloseConversionRateSettled": False,
"CorrelationKey": "46dc6b2a-5b6f-43c8-b747-6b530da9110e",
"ExecutionTimeOpen": "2019-03-04T00:10:23.040641Z",
"IsMarketOpen": True,
"OpenPrice": 1.13715,
"RelatedOpenOrders": [],
"SourceOrderId": "76271915",
"SpotDate": "2019-03-06",
"Status": "Open",
"Uic": 21,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"PositionId": "212561926",
"PositionView": {
"CalculationReliability": "Ok",
"ConversionRateCurrent": 0.882905,
"ConversionRateOpen": 0.882905,
"CurrentPrice": 1.13273,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Ask",
"Exposure": -100000,
"ExposureCurrency": "EUR",
"ExposureInBaseCurrency": -100000,
"InstrumentPriceDayPercentChange": -0.34,
"ProfitLossOnTrade": 442,
"ProfitLossOnTradeInBaseCurrency": 390.24,
"TradeCostsTotal": -11.35,
"TradeCostsTotalInBaseCurrency": -10.02
}
},
{
"NetPositionId": "EURUSD__FxSpot",
"PositionBase": {
"AccountId": "9226397",
"Amount": 100000,
"AssetType": "FxSpot",
"CanBeClosed": True,
"ClientId": "9226397",
"CloseConversionRateSettled": False,
"CorrelationKey": "50fae087-b7d4-49ab-afa2-5145cd56a7c5",
"ExecutionTimeOpen": "2019-03-04T00:04:11.340151Z",
"IsMarketOpen": True,
"OpenPrice": 1.1371,
"RelatedOpenOrders": [],
"SourceOrderId": "76271912",
"SpotDate": "2019-03-06",
"Status": "Open",
"Uic": 21,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"PositionId": "212561892",
"PositionView": {
"CalculationReliability": "Ok",
"ConversionRateCurrent": 0.882905,
"ConversionRateOpen": 0.882905,
"CurrentPrice": 1.13253,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Bid",
"Exposure": 100000,
"ExposureCurrency": "EUR",
"ExposureInBaseCurrency": 100000,
"InstrumentPriceDayPercentChange": -0.34,
"ProfitLossOnTrade": -457,
"ProfitLossOnTradeInBaseCurrency": -403.49,
"TradeCostsTotal": -11.35,
"TradeCostsTotalInBaseCurrency": -10.02
}
},
{
"NetPositionId": "GBPAUD__FxSpot",
"PositionBase": {
"AccountId": "9226397",
"Amount": 500000,
"AssetType": "FxSpot",
"CanBeClosed": True,
"ClientId": "9226397",
"CloseConversionRateSettled": False,
"CorrelationKey": "206cceed-2240-43f8-8c46-840e8b722549",
"ExecutionTimeOpen": "2019-03-03T23:35:08.243690Z",
"IsMarketOpen": True,
"OpenPrice": 1.86391,
"RelatedOpenOrders": [],
"SourceOrderId": "76271862",
"SpotDate": "2019-03-06",
"Status": "Open",
"Uic": 22,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"PositionId": "212550212",
"PositionView": {
"CalculationReliability": "Ok",
"ConversionRateCurrent": 0.62534,
"ConversionRateOpen": 0.62534,
"CurrentPrice": 1.86127,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Bid",
"Exposure": 500000,
"ExposureCurrency": "GBP",
"ExposureInBaseCurrency": 582115,
"InstrumentPriceDayPercentChange": -0.19,
"ProfitLossOnTrade": -1320,
"ProfitLossOnTradeInBaseCurrency": -825.45,
"TradeCostsTotal": -93.13,
"TradeCostsTotalInBaseCurrency": -58.24
}
},
{
"NetPositionId": "GBPCAD__FxSpot",
"PositionBase": {
"AccountId": "9226397",
"Amount": 100000,
"AssetType": "FxSpot",
"CanBeClosed": True,
"ClientId": "9226397",
"CloseConversionRateSettled": False,
"CorrelationKey": "19c44107-6858-4191-805c-764a69d27491",
"ExecutionTimeOpen": "2019-03-03T23:34:51.823660Z",
"IsMarketOpen": True,
"OpenPrice": 1.75824,
"RelatedOpenOrders": [],
"SourceOrderId": "76271861",
"SpotDate": "2019-03-06",
"Status": "Open",
"Uic": 23,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"PositionId": "212550210",
"PositionView": {
"CalculationReliability": "Ok",
"ConversionRateCurrent": 0.66389,
"ConversionRateOpen": 0.66389,
"CurrentPrice": 1.75321,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Bid",
"Exposure": 100000,
"ExposureCurrency": "GBP",
"ExposureInBaseCurrency": 116423,
"InstrumentPriceDayPercentChange": -0.17,
"ProfitLossOnTrade": -503,
"ProfitLossOnTradeInBaseCurrency": -333.94,
"TradeCostsTotal": -17.56,
"TradeCostsTotalInBaseCurrency": -11.66
}
}
]
}
},
"_v3_PositionListSubscription": {
"url": "/openapi/port/v1/positions/subscriptions",
"params": {},
"body": {
"Arguments": {
"ClientKey": "Cf4xZWiYL6W1nMKpygBLLA=="
},
"ContextId": "explorer_1551702571343",
"ReferenceId": "C_702"
},
"response": {
"ContextId": "explorer_1551702571343",
"Format": "application/json",
"InactivityTimeout": 30,
"ReferenceId": "C_702",
"RefreshRate": 1000,
"Snapshot": {
"Data": [
{
"NetPositionId": "EURUSD__FxSpot",
"PositionBase": {
"AccountId": "9226397",
"Amount": -100000,
"AssetType": "FxSpot",
"CanBeClosed": True,
"ClientId": "9226397",
"CloseConversionRateSettled": False,
"CorrelationKey": "46dc6b2a-5b6f-43c8-b747-6b530da9110e",
"ExecutionTimeOpen": "2019-03-04T00:10:23.040641Z",
"IsMarketOpen": True,
"OpenPrice": 1.13715,
"RelatedOpenOrders": [],
"SourceOrderId": "76271915",
"SpotDate": "2019-03-06",
"Status": "Open",
"Uic": 21,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"PositionId": "212561926",
"PositionView": {
"CalculationReliability": "Ok",
"ConversionRateCurrent": 0.883135,
"ConversionRateOpen": 0.883135,
"CurrentPrice": 1.13243,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Ask",
"Exposure": -100000,
"ExposureCurrency": "EUR",
"ExposureInBaseCurrency": -100000,
"InstrumentPriceDayPercentChange": -0.37,
"ProfitLossOnTrade": 472,
"ProfitLossOnTradeInBaseCurrency": 416.84,
"TradeCostsTotal": -11.35,
"TradeCostsTotalInBaseCurrency": -10.02
}
},
{
"NetPositionId": "EURUSD__FxSpot",
"PositionBase": {
"AccountId": "9226397",
"Amount": 100000,
"AssetType": "FxSpot",
"CanBeClosed": True,
"ClientId": "9226397",
"CloseConversionRateSettled": False,
"CorrelationKey": "50fae087-b7d4-49ab-afa2-5145cd56a7c5",
"ExecutionTimeOpen": "2019-03-04T00:04:11.340151Z",
"IsMarketOpen": True,
"OpenPrice": 1.1371,
"RelatedOpenOrders": [],
"SourceOrderId": "76271912",
"SpotDate": "2019-03-06",
"Status": "Open",
"Uic": 21,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"PositionId": "212561892",
"PositionView": {
"CalculationReliability": "Ok",
"ConversionRateCurrent": 0.883135,
"ConversionRateOpen": 0.883135,
"CurrentPrice": 1.13223,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Bid",
"Exposure": 100000,
"ExposureCurrency": "EUR",
"ExposureInBaseCurrency": 100000,
"InstrumentPriceDayPercentChange": -0.37,
"ProfitLossOnTrade": -487,
"ProfitLossOnTradeInBaseCurrency": -430.09,
"TradeCostsTotal": -11.35,
"TradeCostsTotalInBaseCurrency": -10.02
}
},
{
"NetPositionId": "GBPAUD__FxSpot",
"PositionBase": {
"AccountId": "9226397",
"Amount": 500000,
"AssetType": "FxSpot",
"CanBeClosed": True,
"ClientId": "9226397",
"CloseConversionRateSettled": False,
"CorrelationKey": "206cceed-2240-43f8-8c46-840e8b722549",
"ExecutionTimeOpen": "2019-03-03T23:35:08.243690Z",
"IsMarketOpen": True,
"OpenPrice": 1.86391,
"RelatedOpenOrders": [],
"SourceOrderId": "76271862",
"SpotDate": "2019-03-06",
"Status": "Open",
"Uic": 22,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"PositionId": "212550212",
"PositionView": {
"CalculationReliability": "Ok",
"ConversionRateCurrent": 0.625415,
"ConversionRateOpen": 0.625415,
"CurrentPrice": 1.86215,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Bid",
"Exposure": 500000,
"ExposureCurrency": "GBP",
"ExposureInBaseCurrency": 582455,
"InstrumentPriceDayPercentChange": -0.14,
"ProfitLossOnTrade": -880,
"ProfitLossOnTradeInBaseCurrency": -550.37,
"TradeCostsTotal": -93.15,
"TradeCostsTotalInBaseCurrency": -58.26
}
},
{
"NetPositionId": "GBPCAD__FxSpot",
"PositionBase": {
"AccountId": "9226397",
"Amount": 100000,
"AssetType": "FxSpot",
"CanBeClosed": True,
"ClientId": "9226397",
"CloseConversionRateSettled": False,
"CorrelationKey": "19c44107-6858-4191-805c-764a69d27491",
"ExecutionTimeOpen": "2019-03-03T23:34:51.823660Z",
"IsMarketOpen": True,
"OpenPrice": 1.75824,
"RelatedOpenOrders": [],
"SourceOrderId": "76271861",
"SpotDate": "2019-03-06",
"Status": "Open",
"Uic": 23,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"PositionId": "212550210",
"PositionView": {
"CalculationReliability": "Ok",
"ConversionRateCurrent": 0.66362,
"ConversionRateOpen": 0.66362,
"CurrentPrice": 1.75496,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Bid",
"Exposure": 100000,
"ExposureCurrency": "GBP",
"ExposureInBaseCurrency": 116491,
"InstrumentPriceDayPercentChange": -0.07,
"ProfitLossOnTrade": -328,
"ProfitLossOnTradeInBaseCurrency": -217.67,
"TradeCostsTotal": -17.56,
"TradeCostsTotalInBaseCurrency": -11.65
}
}
],
"MaxRows": 100000
},
"State": "Active"
}
},
"_v3_PositionSubscriptionPageSize": {
"url": "/openapi/port/v1/positions/subscriptions/"
"{ContextId}/{ReferenceId}",
"body": {
"NewPageSize": 25630
},
"response": ''
},
"_v3_PositionSubscriptionRemoveMultiple": {
"url": "/openapi/port/v1/positions/subscriptions/{ContextId}",
"params": {
"Tag": "..."
},
"response": ''
},
"_v3_PositionSubscriptionRemove": {
"url": "/openapi/port/v1/positions/subscriptions/"
"{ContextId}/{ReferenceId}",
"response": ''
},
}
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/portfolio/responses/positions.py
| 0.423339 | 0.378229 |
positions.py
|
pypi
|
responses = {
"_v3_SingleNetPosition": {
"url": "/openapi/port/v1/netpositions/{NetPositionId}",
"params": {'ClientKey': 'Cf4xZWiYL6W1nMKpygBLLA=='},
"response": {
"NetPositionBase": {
"AccountId": "9226397",
"Amount": 100000,
"AssetType": "FxSpot",
"CanBeClosed": True,
"ClientId": "9226397",
"IsMarketOpen": True,
"NumberOfRelatedOrders": 0,
"PositionsAccount": "9226397",
"SinglePositionId": "212550210",
"SinglePositionStatus": "Open",
"Uic": 23,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"NetPositionDetails": {
"CloseCost": {
"Commission": 8.77
},
"CloseCostInBaseCurrency": {
"Commission": 5.81
},
"MarketValue": -508,
"MarketValueInBaseCurrency": -336.7,
"OpenCost": {
"Commission": 8.79
},
"OpenCostInBaseCurrency": {
"Commission": 5.83
}
},
"NetPositionId": "GBPCAD__FxSpot",
"NetPositionView": {
"AverageOpenPrice": 1.75824,
"CalculationReliability": "Ok",
"CurrentPrice": 1.75316,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Bid",
"Exposure": 100000,
"ExposureCurrency": "GBP",
"ExposureInBaseCurrency": 116226,
"InstrumentPriceDayPercentChange": -0.17,
"PositionCount": 1,
"PositionsNotClosedCount": 1,
"ProfitLossOnTrade": -508,
"ProfitLossOnTradeInBaseCurrency": -336.7,
"Status": "Open",
"TradeCostsTotal": -17.56,
"TradeCostsTotalInBaseCurrency": -11.64
}
}
},
"_v3_SingleNetPositionDetails": {
"url": "/openapi/port/v1/netpositions/{NetPositionId}/details",
"params": {'ClientKey': 'Cf4xZWiYL6W1nMKpygBLLA=='},
"response": {
"NetPositionBase": {
"AccountId": "9226397",
"Amount": 100000,
"AssetType": "FxSpot",
"CanBeClosed": True,
"ClientId": "9226397",
"IsMarketOpen": True,
"NumberOfRelatedOrders": 0,
"PositionsAccount": "9226397",
"SinglePositionId": "212550210",
"SinglePositionStatus": "Open",
"Uic": 23,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"NetPositionDetails": {
"CloseCost": {
"Commission": 8.77
},
"CloseCostInBaseCurrency": {
"Commission": 5.81
},
"MarketValue": -478,
"MarketValueInBaseCurrency": -316.77,
"OpenCost": {
"Commission": 8.79
},
"OpenCostInBaseCurrency": {
"Commission": 5.83
}
},
"NetPositionId": "GBPCAD__FxSpot",
"NetPositionView": {
"AverageOpenPrice": 1.75824,
"CalculationReliability": "Ok",
"CurrentPrice": 1.75346,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Bid",
"Exposure": 100000,
"ExposureCurrency": "GBP",
"ExposureInBaseCurrency": 116228,
"InstrumentPriceDayPercentChange": -0.15,
"PositionCount": 1,
"PositionsNotClosedCount": 1,
"ProfitLossOnTrade": -478,
"ProfitLossOnTradeInBaseCurrency": -316.77,
"Status": "Open",
"TradeCostsTotal": -17.56,
"TradeCostsTotalInBaseCurrency": -11.64
}
}
},
"_v3_NetPositionsMe": {
"url": "/openapi/port/v1/netpositions/me",
"params": {},
"response": {
"__count": 4,
"Data": [
{
"NetPositionBase": {
"Amount": 100000,
"AssetType": "FxSpot",
"CanBeClosed": True,
"ClientId": "9226397",
"IsMarketOpen": True,
"NumberOfRelatedOrders": 0,
"PositionsAccount": "9226397",
"SinglePositionId": "212550210",
"SinglePositionStatus": "Open",
"Uic": 23,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"NetPositionId": "GBPCAD__FxSpot",
"NetPositionView": {
"AverageOpenPrice": 1.75824,
"CalculationReliability": "Ok",
"CurrentPrice": 1.75322,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Bid",
"Exposure": 100000,
"ExposureCurrency": "GBP",
"ExposureInBaseCurrency": 116201.5,
"InstrumentPriceDayPercentChange": -0.17,
"PositionCount": 1,
"PositionsNotClosedCount": 1,
"ProfitLossOnTrade": -502,
"ProfitLossOnTradeInBaseCurrency": -332.64,
"Status": "Open",
"TradeCostsTotal": -17.56,
"TradeCostsTotalInBaseCurrency": -11.64
}
},
{
"NetPositionBase": {
"Amount": 500000,
"AssetType": "FxSpot",
"CanBeClosed": True,
"ClientId": "9226397",
"IsMarketOpen": True,
"NumberOfRelatedOrders": 0,
"PositionsAccount": "9226397",
"SinglePositionId": "212550212",
"SinglePositionStatus": "Open",
"Uic": 22,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"NetPositionId": "GBPAUD__FxSpot",
"NetPositionView": {
"AverageOpenPrice": 1.86391,
"CalculationReliability": "Ok",
"CurrentPrice": 1.85813,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Bid",
"Exposure": 500000,
"ExposureCurrency": "GBP",
"ExposureInBaseCurrency": 581007.5,
"InstrumentPriceDayPercentChange": -0.35,
"PositionCount": 1,
"PositionsNotClosedCount": 1,
"ProfitLossOnTrade": -2890,
"ProfitLossOnTradeInBaseCurrency": -1806.86,
"Status": "Open",
"TradeCostsTotal": -93.05,
"TradeCostsTotalInBaseCurrency": -58.18
}
},
{
"NetPositionBase": {
"Amount": 0,
"AssetType": "FxSpot",
"CanBeClosed": False,
"ClientId": "9226397",
"IsMarketOpen": True,
"NumberOfRelatedOrders": 0,
"PositionsAccount": "9226397",
"Uic": 21,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"NetPositionId": "EURUSD__FxSpot",
"NetPositionView": {
"AverageOpenPrice": 0,
"CalculationReliability": "Ok",
"CurrentPrice": 1.13343,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Bid",
"Exposure": 0,
"ExposureCurrency": "EUR",
"ExposureInBaseCurrency": 0,
"InstrumentPriceDayPercentChange": -0.26,
"PositionCount": 2,
"PositionsNotClosedCount": 2,
"ProfitLossOnTrade": 5,
"ProfitLossOnTradeInBaseCurrency": 4.41,
"Status": "Open",
"TradeCostsTotal": -11.38,
"TradeCostsTotalInBaseCurrency": -10.04
}
},
{
"NetPositionBase": {
"Amount": 10000,
"AssetType": "FxSpot",
"CanBeClosed": True,
"ClientId": "9226397",
"IsMarketOpen": True,
"NumberOfRelatedOrders": 0,
"PositionsAccount": "9226397",
"SinglePositionId": "212675868",
"SinglePositionStatus": "Open",
"Uic": 31,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"NetPositionId": "GBPUSD__FxSpot",
"NetPositionView": {
"AverageOpenPrice": 1.31849,
"CalculationReliability": "Ok",
"CurrentPrice": 1.31701,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Bid",
"Exposure": 10000,
"ExposureCurrency": "GBP",
"ExposureInBaseCurrency": 11620.15,
"InstrumentPriceDayPercentChange": -0.27,
"PositionCount": 1,
"PositionsNotClosedCount": 1,
"ProfitLossOnTrade": -14.8,
"ProfitLossOnTradeInBaseCurrency": -13.06,
"Status": "Open",
"TradeCostsTotal": -6,
"TradeCostsTotalInBaseCurrency": -5.29
}
}
]
}
},
"_v3_NetPositionsQuery": {
"url": "/openapi/port/v1/netpositions/",
"params": {'ClientKey': 'Cf4xZWiYL6W1nMKpygBLLA=='},
"response": {
"__count": 4,
"Data": [
{
"NetPositionBase": {
"AccountId": "9226397",
"Amount": 100000,
"AssetType": "FxSpot",
"CanBeClosed": True,
"ClientId": "9226397",
"IsMarketOpen": True,
"NumberOfRelatedOrders": 0,
"PositionsAccount": "9226397",
"SinglePositionId": "212550210",
"SinglePositionStatus": "Open",
"Uic": 23,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"NetPositionId": "GBPCAD__FxSpot",
"NetPositionView": {
"AverageOpenPrice": 1.75824,
"CalculationReliability": "Ok",
"CurrentPrice": 1.75287,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Bid",
"Exposure": 100000,
"ExposureCurrency": "GBP",
"ExposureInBaseCurrency": 116180.5,
"InstrumentPriceDayPercentChange": -0.19,
"PositionCount": 1,
"PositionsNotClosedCount": 1,
"ProfitLossOnTrade": -537,
"ProfitLossOnTradeInBaseCurrency": -355.85,
"Status": "Open",
"TradeCostsTotal": -17.55,
"TradeCostsTotalInBaseCurrency": -11.63
}
},
{
"NetPositionBase": {
"AccountId": "9226397",
"Amount": 500000,
"AssetType": "FxSpot",
"CanBeClosed": True,
"ClientId": "9226397",
"IsMarketOpen": True,
"NumberOfRelatedOrders": 0,
"PositionsAccount": "9226397",
"SinglePositionId": "212550212",
"SinglePositionStatus": "Open",
"Uic": 22,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"NetPositionId": "GBPAUD__FxSpot",
"NetPositionView": {
"AverageOpenPrice": 1.86391,
"CalculationReliability": "Ok",
"CurrentPrice": 1.8575,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Bid",
"Exposure": 500000,
"ExposureCurrency": "GBP",
"ExposureInBaseCurrency": 580902.5,
"InstrumentPriceDayPercentChange": -0.39,
"PositionCount": 1,
"PositionsNotClosedCount": 1,
"ProfitLossOnTrade": -3205,
"ProfitLossOnTradeInBaseCurrency": -2004.09,
"Status": "Open",
"TradeCostsTotal": -93.04,
"TradeCostsTotalInBaseCurrency": -58.18
}
},
{
"NetPositionBase": {
"AccountId": "9226397",
"Amount": 0,
"AssetType": "FxSpot",
"CanBeClosed": False,
"ClientId": "9226397",
"IsMarketOpen": True,
"NumberOfRelatedOrders": 0,
"PositionsAccount": "9226397",
"Uic": 21,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"NetPositionId": "EURUSD__FxSpot",
"NetPositionView": {
"AverageOpenPrice": 0,
"CalculationReliability": "Ok",
"CurrentPrice": 1.13377,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Bid",
"Exposure": 0,
"ExposureCurrency": "EUR",
"ExposureInBaseCurrency": 0,
"InstrumentPriceDayPercentChange": -0.23,
"PositionCount": 2,
"PositionsNotClosedCount": 2,
"ProfitLossOnTrade": 5,
"ProfitLossOnTradeInBaseCurrency": 4.41,
"Status": "Open",
"TradeCostsTotal": -11.38,
"TradeCostsTotalInBaseCurrency": -10.04
}
},
{
"NetPositionBase": {
"AccountId": "9226397",
"Amount": 10000,
"AssetType": "FxSpot",
"CanBeClosed": True,
"ClientId": "9226397",
"IsMarketOpen": True,
"NumberOfRelatedOrders": 0,
"PositionsAccount": "9226397",
"SinglePositionId": "212675868",
"SinglePositionStatus": "Open",
"Uic": 31,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"NetPositionId": "GBPUSD__FxSpot",
"NetPositionView": {
"AverageOpenPrice": 1.31849,
"CalculationReliability": "Ok",
"CurrentPrice": 1.31718,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Bid",
"Exposure": 10000,
"ExposureCurrency": "GBP",
"ExposureInBaseCurrency": 11618.05,
"InstrumentPriceDayPercentChange": -0.26,
"PositionCount": 1,
"PositionsNotClosedCount": 1,
"ProfitLossOnTrade": -13.1,
"ProfitLossOnTradeInBaseCurrency": -11.55,
"Status": "Open",
"TradeCostsTotal": -6,
"TradeCostsTotalInBaseCurrency": -5.29
}
}
]
}
},
"_v3_NetPositionListSubscription": {
"url": "/openapi/port/v1/netpositions/subscriptions",
"body": {
"Arguments": {
"ClientKey": "Cf4xZWiYL6W1nMKpygBLLA=="
},
"ContextId": "explorer_1551702571343",
"ReferenceId": "F_20"
},
"response": {
"ContextId": "explorer_1551702571343",
"Format": "application/json",
"InactivityTimeout": 30,
"ReferenceId": "F_20",
"RefreshRate": 1000,
"Snapshot": {
"Data": [
{
"NetPositionBase": {
"Amount": 100000,
"AssetType": "FxSpot",
"CanBeClosed": True,
"ClientId": "9226397",
"IsMarketOpen": True,
"NumberOfRelatedOrders": 0,
"PositionsAccount": "9226397",
"SinglePositionId": "212550210",
"SinglePositionStatus": "Open",
"Uic": 23,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"NetPositionId": "GBPCAD__FxSpot",
"NetPositionView": {
"AverageOpenPrice": 1.75824,
"CalculationReliability": "Ok",
"CurrentPrice": 1.75313,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Bid",
"Exposure": 100000,
"ExposureCurrency": "GBP",
"ExposureInBaseCurrency": 116179,
"InstrumentPriceDayPercentChange": -0.17,
"PositionCount": 1,
"PositionsNotClosedCount": 1,
"ProfitLossOnTrade": -511,
"ProfitLossOnTradeInBaseCurrency": -338.57,
"Status": "Open",
"TradeCostsTotal": -17.56,
"TradeCostsTotalInBaseCurrency": -11.63
}
},
{
"NetPositionBase": {
"Amount": 500000,
"AssetType": "FxSpot",
"CanBeClosed": True,
"ClientId": "9226397",
"IsMarketOpen": True,
"NumberOfRelatedOrders": 0,
"PositionsAccount": "9226397",
"SinglePositionId": "212550212",
"SinglePositionStatus": "Open",
"Uic": 22,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"NetPositionId": "GBPAUD__FxSpot",
"NetPositionView": {
"AverageOpenPrice": 1.86391,
"CalculationReliability": "Ok",
"CurrentPrice": 1.85805,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Bid",
"Exposure": 500000,
"ExposureCurrency": "GBP",
"ExposureInBaseCurrency": 580895,
"InstrumentPriceDayPercentChange": -0.36,
"PositionCount": 1,
"PositionsNotClosedCount": 1,
"ProfitLossOnTrade": -2930,
"ProfitLossOnTradeInBaseCurrency": -1831.62,
"Status": "Open",
"TradeCostsTotal": -93.05,
"TradeCostsTotalInBaseCurrency": -58.17
}
},
{
"NetPositionBase": {
"Amount": 0,
"AssetType": "FxSpot",
"CanBeClosed": False,
"ClientId": "9226397",
"IsMarketOpen": True,
"NumberOfRelatedOrders": 0,
"PositionsAccount": "9226397",
"Uic": 21,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"NetPositionId": "EURUSD__FxSpot",
"NetPositionView": {
"AverageOpenPrice": 0,
"CalculationReliability": "Ok",
"CurrentPrice": 1.13416,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Bid",
"Exposure": 0,
"ExposureCurrency": "EUR",
"ExposureInBaseCurrency": 0,
"InstrumentPriceDayPercentChange": -0.2,
"PositionCount": 2,
"PositionsNotClosedCount": 2,
"ProfitLossOnTrade": 5,
"ProfitLossOnTradeInBaseCurrency": 4.41,
"Status": "Open",
"TradeCostsTotal": -11.38,
"TradeCostsTotalInBaseCurrency": -10.03
}
},
{
"NetPositionBase": {
"Amount": 10000,
"AssetType": "FxSpot",
"CanBeClosed": True,
"ClientId": "9226397",
"IsMarketOpen": True,
"NumberOfRelatedOrders": 0,
"PositionsAccount": "9226397",
"SinglePositionId": "212675868",
"SinglePositionStatus": "Open",
"Uic": 31,
"ValueDate": "2019-03-06T00:00:00.000000Z"
},
"NetPositionId": "GBPUSD__FxSpot",
"NetPositionView": {
"AverageOpenPrice": 1.31849,
"CalculationReliability": "Ok",
"CurrentPrice": 1.3176,
"CurrentPriceDelayMinutes": 0,
"CurrentPriceType": "Bid",
"Exposure": 10000,
"ExposureCurrency": "GBP",
"ExposureInBaseCurrency": 11617.9,
"InstrumentPriceDayPercentChange": -0.23,
"PositionCount": 1,
"PositionsNotClosedCount": 1,
"ProfitLossOnTrade": -8.9,
"ProfitLossOnTradeInBaseCurrency": -7.85,
"Status": "Open",
"TradeCostsTotal": -6,
"TradeCostsTotalInBaseCurrency": -5.29
}
}
]
},
"State": "Active"
}
},
"_v3_NetPositionSubscriptionRemoveMultiple": {
"url": "/openapi/port/v1/netpositions/subscriptions/{ContextId}",
"params": {
"Tag": "..."
},
"response": ''
},
"_v3_NetPositionSubscriptionRemove": {
"url": "/openapi/port/v1/netpositions/subscriptions/"
"{ContextId}/{ReferenceId}",
"response": ''
},
}
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/portfolio/responses/netpositions.py
| 0.515864 | 0.4081 |
netpositions.py
|
pypi
|
responses = {
"_v3_NetInstrumentsExposureMe": {
"url": "/openapi/port/v1/exposure/instruments/me",
"response": [
{
"Amount": 60000,
"AssetType": "FxSpot",
"AverageOpenPrice": 1.13071,
"CalculationReliability": "Ok",
"CanBeClosed": True,
"DisplayAndFormat": {
"Currency": "USD",
"Decimals": 4,
"Description": "Euro/US Dollar",
"Format": "AllowDecimalPips",
"Symbol": "EURUSD"
},
"InstrumentPriceDayPercentChange": 0.42,
"NetPositionId": "EURUSD__FxSpot",
"ProfitLossOnTrade": -408.6,
"Uic": 21
},
{
"Amount": -50000,
"AssetType": "FxSpot",
"AverageOpenPrice": 8.6839,
"CalculationReliability": "Ok",
"CanBeClosed": True,
"DisplayAndFormat": {
"Currency": "DKK",
"Decimals": 4,
"Description": "British Pound/Danish Krone",
"Format": "Normal",
"Symbol": "GBPDKK"
},
"InstrumentPriceDayPercentChange": -1,
"NetPositionId": "GBPDKK__FxSpot",
"ProfitLossOnTrade": 2530,
"Uic": 25
}
]
},
"_v3_NetInstrumentExposureSpecific": {
"url": "/openapi/port/v1/exposure/instruments",
"params": {'ClientKey': 'Cf4xZWiYL6W1nMKpygBLLA=='},
"response": [
{
"Amount": 60000,
"AssetType": "FxSpot",
"AverageOpenPrice": 1.13071,
"CalculationReliability": "Ok",
"CanBeClosed": True,
"DisplayAndFormat": {
"Currency": "USD",
"Decimals": 4,
"Description": "Euro/US Dollar",
"Format": "AllowDecimalPips",
"Symbol": "EURUSD"
},
"InstrumentPriceDayPercentChange": 0.42,
"NetPositionId": "EURUSD__FxSpot",
"ProfitLossOnTrade": -405,
"Uic": 21
},
{
"Amount": -50000,
"AssetType": "FxSpot",
"AverageOpenPrice": 8.6839,
"CalculationReliability": "Ok",
"CanBeClosed": True,
"DisplayAndFormat": {
"Currency": "DKK",
"Decimals": 4,
"Description": "British Pound/Danish Krone",
"Format": "Normal",
"Symbol": "GBPDKK"
},
"InstrumentPriceDayPercentChange": -1.02,
"NetPositionId": "GBPDKK__FxSpot",
"ProfitLossOnTrade": 2600,
"Uic": 25
}
]
},
"_v3_CreateExposureSubscription": {
"url": "/openapi/port/v1/exposure/instruments/subscriptions",
"body": {
"Arguments": {
"ClientKey": "Cf4xZWiYL6W1nMKpygBLLA=="
},
"ContextId": "explorer_1552035128308",
"ReferenceId": "Z_807"
},
"response": {
"ContextId": "explorer_1552035128308",
"Format": "application/json",
"InactivityTimeout": 30,
"ReferenceId": "Z_807",
"RefreshRate": 1000,
"Snapshot": {
"Data": [
{
"Amount": 60000,
"AssetType": "FxSpot",
"AverageOpenPrice": 1.13071,
"CalculationReliability": "Ok",
"CanBeClosed": True,
"DisplayAndFormat": {
"Currency": "USD",
"Decimals": 4,
"Description": "Euro/US Dollar",
"Format": "AllowDecimalPips",
"Symbol": "EURUSD"
},
"InstrumentPriceDayPercentChange": 0.44,
"NetPositionId": "EURUSD__FxSpot",
"ProfitLossOnTrade": -396.6,
"Uic": 21
},
{
"Amount": -50000,
"AssetType": "FxSpot",
"AverageOpenPrice": 8.6839,
"CalculationReliability": "Ok",
"CanBeClosed": True,
"DisplayAndFormat": {
"Currency": "DKK",
"Decimals": 4,
"Description": "British Pound/Danish Krone",
"Format": "Normal",
"Symbol": "GBPDKK"
},
"InstrumentPriceDayPercentChange": -0.98,
"NetPositionId": "GBPDKK__FxSpot",
"ProfitLossOnTrade": 2420,
"Uic": 25
}
]
},
"State": "Active"
}
},
"_v3_RemoveExposureSubscriptionsByTag": {
"url": "/openapi/port/v1/exposure/instruments/"
"subscriptions/{ContextId}",
"params": {},
"response": ''
},
"_v3_RemoveExposureSubscription": {
"url": "/openapi/port/v1/exposure/instruments/"
"subscriptions/{ContextId}/{ReferenceId}",
"params": {},
"response": ''
},
"_v3_CurrencyExposureMe": {
"url": "/openapi/port/v1/exposure/currency/me",
"response": [
{
"Amount": 1057573.99,
"Currency": "EUR"
},
{
"Amount": -67842.6,
"Currency": "USD"
},
{
"Amount": -50000,
"Currency": "GBP"
},
{
"Amount": 434195,
"Currency": "DKK"
}
]
},
"_v3_CurrencyExposureSpecific": {
"url": "/openapi/port/v1/exposure/currency/me",
"params": {'ClientKey': 'Cf4xZWiYL6W1nMKpygBLLA=='},
"response": [
{
"Amount": 1057573.99,
"Currency": "EUR"
},
{
"Amount": -67842.6,
"Currency": "USD"
},
{
"Amount": -50000,
"Currency": "GBP"
},
{
"Amount": 434195,
"Currency": "DKK"
}
]
},
"_v3_FxSpotExposureMe": {
"url": "/openapi/port/v1/exposure/fxspot/me",
"response": [
{
"Amount": 431950,
"AmountInCalculationEntityCurrency": 57878,
"Currency": "DKK"
},
{
"Amount": 60000,
"AmountInCalculationEntityCurrency": 60000,
"Currency": "EUR"
},
{
"Amount": -50000,
"AmountInCalculationEntityCurrency": -57878,
"Currency": "GBP"
},
{
"Amount": -67402.2,
"AmountInCalculationEntityCurrency": -60000,
"Currency": "USD"
}
]
},
"_v3_FxSpotExposureSpecific": {
"url": "/openapi/port/v1/exposure/fxspot/me",
"params": {'ClientKey': 'Cf4xZWiYL6W1nMKpygBLLA=='},
"response": [
{
"Amount": 432350,
"AmountInCalculationEntityCurrency": 57929,
"Currency": "DKK"
},
{
"Amount": 60000,
"AmountInCalculationEntityCurrency": 60000,
"Currency": "EUR"
},
{
"Amount": -50000,
"AmountInCalculationEntityCurrency": -57929,
"Currency": "GBP"
},
{
"Amount": -67398,
"AmountInCalculationEntityCurrency": -60000,
"Currency": "USD"
}
]
},
}
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/endpoints/portfolio/responses/exposure.py
| 0.544559 | 0.400749 |
exposure.py
|
pypi
|
"""AccountHistory Definitions."""
definitions = {
"InlineCountValue": {
"AllPages": "The results will contain a total count of items in the "
"queried dataset.",
"None": "The results will not contain an inline count.",
},
"AccountPerformanceStandardPeriod": {
"AllTime": "All time account performance.",
"Month": "The month standard account performance.",
"Quarter": "The quarter standard account performance.",
"Year": "The year standard account performance.",
},
"AssetType": {
"Name": "Description",
"Bond": "Bond.",
"Cash": "Cash. Not tradeable!",
"CfdIndexOption": "Cfd Index Option.",
"CfdOnFutures": "Cfd on Futures.",
"CfdOnIndex": "Cfd on Stock Index.",
"CfdOnStock": "Cfd on Stock.",
"ContractFutures": "Contract Futures.",
"FuturesOption": "Futures Option.",
"FuturesStrategy": "Futures Strategy.",
"FxBinaryOption": "Forex Binary Option.",
"FxForwards": "Forex Forward.",
"FxKnockInOption": "Forex Knock In Option.",
"FxKnockOutOption": "Forex Knock Out Option.",
"FxNoTouchOption": "Forex No Touch Option.",
"FxOneTouchOption": "Forex One Touch Option.",
"FxSpot": "Forex Spot.",
"FxVanillaOption": "Forex Vanilla Option.",
"ManagedFund": "Obsolete: Managed Fund.",
"MutualFund": "Mutual Fund.",
"Stock": "Stock.",
"StockIndex": "Stock Index.",
"StockIndexOption": "Stock Index Option.",
"StockOption": "Stock Option.",
},
"AccountPerformanceFieldGroup": {
"AccountSummary": "",
"All": "",
"Allocation": "",
"AvailableBenchmarks": "",
"BalancePerformance": "",
"BalancePerformance_AccountValueTimeSeries": "",
"BenchMark": "",
"BenchmarkPerformance": "",
"TimeWeightedPerformance": "",
"TimeWeightedPerformance_AccumulatedTimeWeightedTimeSeries": "",
"TotalCashBalancePerCurrency": "",
"TotalPositionsValuePerCurrency": "",
"TotalPositionsValuePerProductPerSecurity": "",
"TradeActivity": "",
}
}
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/definitions/accounthistory.py
| 0.591133 | 0.494568 |
accounthistory.py
|
pypi
|
definitions = {
'AmountType': {
'CashAmount': 'Order amount is specified as a monetary value',
'Quantity': 'Order Amount is specified as an amount of '
'lots/shares/contracts'
},
'AssetType': {
'Bond': 'Bond',
'CfdIndexOption': 'Cfd Index Option',
'CfdOnFutures': 'Cfd on Futures',
'CfdOnIndex': 'Cfd on Stock Index',
'CfdOnStock': 'Cfd on Stock',
'ContractFutures': 'Contract Futures',
'FuturesOption': 'Futures Option',
'FuturesStrategy': 'Futures Strategy',
'FxBinaryOption': 'Forex Binary Option',
'FxForwards': 'Forex Forward',
'FxKnockInOption': 'Forex Knock In Option',
'FxKnockOutOption': 'Forex Knock Out Option',
'FxNoTouchOption': 'Forex No Touch Option',
'FxOneTouchOption': 'Forex One Touch Option',
'FxSpot': 'Forex Spot',
'FxVanillaOption': 'Forex Vanilla Option',
'ManagedFund': 'Obsolete: Managed Fund',
'MutualFund': 'Mutual Fund',
'Stock': 'Stock',
'StockIndex': 'Stock Index',
'StockIndexOption': 'Stock Index Option',
'StockOption': 'Stock Option',
},
'Direction': {
'Buy': 'Buy',
'Sell': 'Sell'
},
'OrderDurationType': {
'AtTheClose': 'At the close of the trading session',
'AtTheOpening': 'At the Opening of the trading session',
'DayOrder': 'Day order - Valid for the trading session',
'FillOrKill': 'Fill or Kill order',
'GoodForPeriod': 'Good for Period',
'GoodTillCancel': 'Good til Cancel',
'GoodTillDate': 'Good til Date',
'ImmediateOrCancel': 'Immediate or Cancel'
},
'OrderType': {
'Algorithmic': 'Algo order',
'Limit': 'Limit Order',
'Market': 'Market Order',
'Stop': 'Stop Order',
'StopIfTraded': 'Stop if traded',
'StopLimit': 'Stop Limit Order',
'Switch': 'Switch order, Sell X and Buy Y with one order',
'TrailingStop': 'Trailing stop',
'TrailingStopIfBid': 'Trailing stop if bid',
'TrailingStopIfOffered': 'Trailing stop if offered',
'TrailingStopIfTraded': 'Trailing stop if traded',
'Traspaso': 'Traspaso. Specific type of switch order. Only '
'available on select MutualFunds',
'TraspasoIn': 'TraspasoIn. Specific type of switch order'
},
'ToOpenClose': {
'ToClose': 'Order/Position is ToClose',
'ToOpen': 'Order/Position is ToOpen',
'Undefined': 'Undefined'
}
}
|
/saxo_openapi-0.6.0.tar.gz/saxo_openapi-0.6.0/saxo_openapi/definitions/orders.py
| 0.590071 | 0.283041 |
orders.py
|
pypi
|
import saxs_single_bead.form_factors
import numpy as np
def scattering_curve(
residue_codes, residue_locations, minimal_q=0.0, maximal_q=0.5, points=20
):
"""
Computes scattering curve from `residue_codes` and `residue_locations` `N` by `3` array.
Parameters
----------
residue_codes: list(string)
List of residues of length `N`. Can be 3 letter codes (such as "GLY") or single letter codes (such as "G")
residue_locations: np.array(float)
Rectangular array with size `N` by `3` of locations of `C_alpha` atoms (one per residue)
minimal_q: float, optional
Minimal scattering vector, default `0.0`, units: Angstrom^(-1)
maximal_q: float, optional
Maximal scattering vector, default `0.5`, units: Angstrom^(-1)
points: int, optional
Number of points int the plot, default `20.`
Returns
-------
(np.array(float),np.array(float))
A tuple of numpy arrays containing values of `q` and `I(q)` respectively.
"""
distance_matrix = np.sqrt(
np.sum(
(residue_locations[np.newaxis, :, :] - residue_locations[:, np.newaxis, :])
** 2,
axis=-1,
)
)
q_values = np.linspace(minimal_q, maximal_q, points)
I_values = np.zeros_like(q_values)
for i, q in enumerate(q_values):
form_factors = np.array(
[
saxs_single_bead.form_factors.form_factor(code, q)
for code in residue_codes
]
)
I_values[i] = np.sum(
form_factors[:, np.newaxis]
* form_factors[np.newaxis, :]
* np.sinc(distance_matrix * q / np.pi),
axis=(0, 1),
)
return (q_values, I_values)
def scattering_curve_ensemble(
residue_codes, residue_locations, minimal_q=0.0, maximal_q=0.5, points=20
):
"""
Computes average scattering curve from `residue_codes` and `residue_locations` `M` by `N` by `3` array.
Parameters
----------
residue_codes: list(string)
List of residues of length `N`. Can be 3 letter codes (such as "GLY") or single letter codes (such as "G")
residue_locations: np.array(float)
Rank 3 array with size `M` by `N` by `3` of locations of `C_alpha` atoms (one per residue)
minimal_q: float, optional
Minimal scattering vector, default `0.0`, units: Angstrom^(-1)
maximal_q: float, optional
Maximal scattering vector, default `0.5`, units: Angstrom^(-1)
points: int, optional
Number of points int the plot, default `20.`
Returns
-------
(np.array(float),np.array(float))
A tuple of numpy arrays containing values of `q` and `I(q)` respectively.
"""
distance_matrices = np.sqrt(
np.sum(
(
residue_locations[:, np.newaxis, :, :]
- residue_locations[:, :, np.newaxis, :]
)
** 2,
axis=-1,
)
)
q_values = np.linspace(minimal_q, maximal_q, points)
I_values = np.zeros_like(q_values)
for i, q in enumerate(q_values):
form_factors = np.array(
[
saxs_single_bead.form_factors.form_factor(code, q)
for code in residue_codes
]
)
I_values[i] = np.sum(
form_factors[:, np.newaxis]
* form_factors[np.newaxis, :]
* np.mean(np.sinc(distance_matrices * q / np.pi), axis=0),
axis=(0, 1),
)
return (q_values, I_values)
|
/saxs_single_bead-0.0.3.tar.gz/saxs_single_bead-0.0.3/saxs_single_bead/scattering_curve.py
| 0.915837 | 0.70791 |
scattering_curve.py
|
pypi
|
import numpy as np
def _get_spherical():
"""
Return vector distributed uniformly on a sphere
"""
x = 2.0 * np.random.uniform() - 1.0
phi = 2.0 * np.random.uniform() - 1.0
scale = np.sqrt(1.0 - x * x)
y = scale * np.sin(np.pi * phi)
z = scale * np.cos(np.pi * phi)
return np.array([x, y, z])
def _normalize(vector):
"""
Return normalized vector
"""
return vector / np.sqrt(np.sum(vector ** 2, axis=-1))
def replace_bead(
conglomerate, conglomerate_attachment_point, locations, sizes, bead_id
):
"""
Replace single bead in chain defined by locations by a conglomerte
Parameters
----------
conglomarate : np.array
`N` by `3` array of locations of beads within the conglomerate
attachment_point : np.array or tuple(np.array)
vector of length `3` describing attachment point of the conglomerate to the chain or tuple of two such attachment points
locations : np.array
`M` by `3` array of locations of beads in chain
sizes : np.array
vector of length `M` of sizes of beads in chain
bead_id : int
index of bead to be replaced
Returns
-------
np.array
locations of chain with bead replaced with conglomerate
"""
assert isinstance(bead_id, int)
locations_rest = np.delete(locations, bead_id, axis=0)
if bead_id == 0:
chain_attachment_point = (locations[0] * sizes[1] + locations[1] * sizes[0]) / (
sizes[0] + sizes[1]
)
chain_attachment_vector = locations[1] - locations[0]
chain_attachment_vector = _normalize(chain_attachment_vector)
elif bead_id == -1 or bead_id == (len(sizes) - 1):
chain_attachment_point = (locations[-1] * sizes[-2] + locations[-2] * sizes[-1]) / (
sizes[-1] + sizes[-2]
)
chain_attachment_vector = locations[-1] - locations[-2]
chain_attachment_vector = _normalize(chain_attachment_vector)
else:
raise NotImplementedError
# Conglomerate direction vector
conglomerate_centre = np.mean(conglomerate, axis=0)
conglomerate_extent = np.sqrt(
np.sum((conglomerate_centre - conglomerate_attachment_point) ** 2, axis=-1)
)
conglomerate_direction_vector = _normalize(
conglomerate_centre - conglomerate_attachment_point
)
# Align centre of mass
centre_of_mass = np.mean(conglomerate, axis=0)
conglomerate_centered = conglomerate - centre_of_mass
# Align conglomerate direction vector with x-axis
# TODO replace _get_spherical() with something faster
tmp_a = _normalize(np.cross(_get_spherical(), conglomerate_direction_vector))
rotation_a = np.transpose(
np.array(
[
conglomerate_direction_vector,
tmp_a,
np.cross(conglomerate_direction_vector, tmp_a),
]
)
)
conglomerate_axis_aligned = conglomerate_centered @ rotation_a
# Align x direction vector with chain direction vector
tmp_b = _normalize(np.cross(_get_spherical(), chain_attachment_vector))
rotation_b = np.transpose(
np.array(
[
chain_attachment_vector,
tmp_b,
np.cross(chain_attachment_vector, tmp_b),
]
)
)
conglomerate_direction_aligned = conglomerate_axis_aligned @ np.transpose(
rotation_b
)
# Shift conglomerate centre to right location
conglomerate_shifted = (
conglomerate_direction_aligned
+ chain_attachment_point
+ chain_attachment_vector * conglomerate_extent
)
return np.vstack((locations_rest, conglomerate_shifted))
if __name__ == "__main__":
import sarw_spheres
import matplotlib.pyplot as plt
sizes = np.array([30.0] + [1.0] * 60)
beads = sarw_spheres.generateChain(sizes)
beads = beads + np.array([5.0, 7.0, 10.0])
domain = _normalize(np.array([1.0, 2.0, 3.0])) * np.linspace(
0.0, 60.0, num=30
).reshape(-1, 1)
beads_with_domain = replace_bead(domain, domain[-1], beads, sizes, bead_id=0)
ax = plt.axes(projection="3d")
(x, y, z) = (
beads_with_domain[:, 0],
beads_with_domain[:, 1],
beads_with_domain[:, 2],
)
ax.scatter(x, y, z, c=z, cmap="viridis", linewidth=1.0)
plt.show()
|
/saxs_single_bead-0.0.3.tar.gz/saxs_single_bead-0.0.3/saxs_single_bead/replace_bead.py
| 0.878555 | 0.773281 |
replace_bead.py
|
pypi
|
Change Log
==========
**1.6.5** (January 17, 2018)
Added new ``f`` aliast to ``fmt`` as compatibility shim / polyfill
for users moving toward Python 3.6+ f-strings, but who have to
support prior versions.
**1.6.4** (May 27, 2017)
Now uses the latest version of ``ansicolors``, extending to the
full set of CSS color names and hex notations, in addition to the
traditional small set of ANSI color names. So ``say('this',
style='peachpuff')`` or ``say('this', style='#663399')`` to your
heart's content!
A future release will be needed to extend color name parsing to
other notations such as ANSI numeric and CSS ``rgb()`` spcs.
Also fixed a bug when wrapping, ANSI colors, and colored prefixes
are used together.
**1.6.3** (May 26, 2017)
Adds a ``say.verbatim`` method. It provides all the standard say
formatting features, but does NOT interpolate variable expressions
in braces. Useful for managing pre-formatted text which might
contain expressions without the need for escaping.
Updated Python 2/3 compatibility strategy to be Python 3-centric.
Retired _PY3 flag for _PY2 flag, as Python 3 is now the default
assumption. That we now exclude 2.x with x < 6 and 3.x with x < 3
helps greatly. 2.6 and 2.7 make great reaches forward toward 3.x,
and 3.3 started to make strong reaches backwards.
**1.6.1** (May 23, 2017)
Replaces ``textwrap`` module with ``ansiwrap``. ANSI-colored or
styled text can now be correctly wrapped, prefixed, etc. ``say``
version is bumped only slightly, but this marks a substantial
advance in ability to manage colored/styled text in a "just works"
way, which is the original premise of the package.
**1.6.0** (May 19, 2017)
Withdrew support for backflip-level attempts to make Python 2
files behave with rational encodings. If ``say`` opens a file on
your behalf, it will do the rigtht thing. It will also try very
hard to do the right thing with respect to ``sys.stdout``. But for
arbitrary files that you open, make sure they're properly encoded.
Use ``codecs.open`` or ``io.open`` for that.
Reorganized some code. Added and reinstated tests. Bumped coverage
+1%, to 97%.
Added ``file`` parameter to ``say()``, to make 1:1 compatible with
Python 3's native ``print()``.
**1.6.1** (May 15, 2017)
Updated mechanism for method-specific option setting. Still work
in progress, but code now much cleaner.
The experimental operator form of ``say`` ``>`` has been
withdrawn. The operator style isn't consonant with Python
philosophy, complicated the code base, and only partially worked.
Interesting idea, but experience suggests not worth the trouble.
**1.5.0** (May 14, 2017)
Changed name of parameter ``sep`` in ``hr``, ``title``, and
``sep`` methods because discovered it was conflating and
interfering with the ``sep`` parameter in the main options. The
horizontal separator character that is repeated N times is now
addressed as ``char``.
**1.4.5** (March 22, 2017)
Added ``first_rest`` prefix helper. First line gets one prefix,
(all) subsequent lines get another. Prefix helpers reorganized
into their own submodule, ``show.prefixes``.
**1.4.4** (March 22, 2017)
Fixed problem with Unicode stream handling under Python 2. It has
slipped under the testing radar, given too many mocks and not
enough full-out integration testing. Oops!
**1.4.3** (January 23, 2017)
Updates testing for early 2017 Python versions. Successfully
packaged for, and tested against, all late-model versions of
Python: 2.6, 2.7, 3.3, 3.4, 3.5, and 3.6, as well as PyPy 5.6.0
(based on 2.7.12) and PyPy3 5.5.0 (based on 3.3.5). Python 3.2
removed from official support; no longer a current version of
Python and not well-supported by testing matrix.
**1.4.2** (September 15, 2015)
Tested with Python 3.5.0 final.
**1.4.0** (September 8, 2015)
Added ability to set styles for some methods such as ``title``,
``hr``, and ``sep`` as an overlay to class, object, and per-call
settings. This is a first delivery on what will become a general
feature over the next few releases. Added vertical spacing to
``title`` and ``sep`` methods for nicer layouts.
Increased testing line coverage to 96%, improving several
routines' robustness in the process.
**1.3.12** (September 1, 2015)
Tweaks and testing for new version 1.4 of underlying ``options``
module.
New ``options`` version returns support for Python 2.6.
**1.3.9** (August 26, 2015)
Reorganized documentation structure. Updated some setup
dependencies.
**1.3.5** (August 17, 2015)
Instituted integrated, multi-version coverage testing with tox,
pytest, pytest-cov, and coverage. Initial score: 86%.
**1.3.4** (August 16, 2015)
Updated ``SayReturn`` logic, which was broken, in order to support
an upgrade of ``show``
**1.3.3** (August 16, 2015)
Added ``sep`` method for separators.
Some code cleanups and a few additional tests.å
Officially switched to YAML-format Change Log (``CHANGES.yml``)
**1.3.2** (August 12, 2015)
Code cleanups.
**1.3.1** (August 11, 2015)
Doc, config, and testing updates. Removed ``joiner`` module and
tests. May import that funcationality from ``quoter`` module in
future.
Python 2.6 currently unsupported due to issues with underlying
``stuf`` module. Support may return, depending on compatibility
upgrades for future ``stuf`` releases.
**1.3** (July 22, 2015)
Added ``Template`` class. A deferred-rendering version of ``Text``
**1.2.6** (July 22, 2015)
Configuration, testing matrix, and doc tweaks.
**1.2.5** (December 29, 2014)
Fixed problem that was occuring with use of Unicode characters
when rendered inside the Komodo IDE, which set the ``sys.stdout``
encoding to ``US-ASCII`` not ``UTF-8``. In those cases, now
inserts a codec-based writer object to do the encoding.
**1.2.4** (June 4, 2014)
Now testing for Python 3.3 and 3.4. One slight problem with them
when encoding to base64 or similar bytes-oriented output that did
not appear in earlier Python 3 builds. Examining.
Added gittip link as an experiment.
**1.2.1** (October 16, 2013)
Fixed bug with quoting of style names/definitions.
Tweaked documentation of style definitions.
**1.2.0** (September 30, 2013)
Added style definitions and convenient access to ANSI colors.
**1.1.0** (September 24, 2013)
Line numbering now an optional way to format output.
Line wrapping is now much more precise. The ``wrap`` parameter now
specifies the line length desired, including however many
characters are consumed by prefix, suffix, and indentation.
Vertical spacing is regularized and much better tested. The
``vsep`` option, previously available only on a few methods, is
now available everywhere. ``vsep=N`` gives N blank lines before
and after the given output statement. ``vsep=(M,N)`` gives M blank
lines before, and N blank lines after. A new ``Vertical`` class
describes vertical spacing behind the scenes.
``Say`` no longer attempts to handle file encoding itself, but
passes this responsibility off to file objects, such as those
returned by ``io.open``. This is cleaner, though it does remove
the whimsical possibility of automagical base64 and rot13
encodings. The ``encoding`` option is withdrawn as a result.
You can now set the files you'd like to output to in the same way
you'd set any other option (e.g. ``say.set(files=[...])`` or
``say.clone(files=[...])``). "Magic" parameter handling is enabled
so that if any of the items listed are strings, then a file of
that name is opened for writing. Beware, however, that if you
manage the files option explicitly (e.g.
``say.options.files.append(...)``), you had better provide proper
open files. No magical interpretation is done then. The
previously-necessary ``say.setfiles()`` API remains, but is now
deprecated.
``fmt()`` is now handled by ``Fmt``, a proper subclass of ``Say``,
rather than just through instance settings.
``say()`` no longer returns the value it outputs. ``retvalue`` and
``encoded`` options have therefore been withdrawn.
**1.0.4** (September 16, 2013)
Had to back out part of the common ``__version__`` grabbing. Not
compatible with Sphinx / readthedocs build process.
**1.0.3** (September 16, 2013)
Added ``FmtException`` class
Tightened imports for namespace cleanliness.
Doc tweaks.
Added ``__version__`` metadata common to module, ``setup.py``, and
docs.
**1.0.2** (September 14, 2013)
Added ``prefix`` and ``suffix`` options to ``say`` and ``fmt``,
along with docs and tests.
**1.0.1** (September 13, 2013)
Moved main documentation to Sphinx format in ``./docs``, and
hosted the long-form documentation on readthedocs.org.
``README.rst`` now an abridged version/teaser for the module.
**1.0** (September 17, 2013)
Cleaned up source for better PEP8 conformance
Bumped version number to 1.0 as part of move to `semantic
versioning <http://semver.org>`_, or at least enough of it so as
to not screw up Python installation procedures (which don't seem
to understand 0.401 is a lesser version that 0.5, because 401 >
5).
|
/say-1.6.5.zip/say-1.6.5/docs/CHANGES.rst
| 0.843283 | 0.679285 |
CHANGES.rst
|
pypi
|
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
|
/sayan_distributions-1.0.tar.gz/sayan_distributions-1.0/sayan_distributions/Gaussiandistribution.py
| 0.688364 | 0.853058 |
Gaussiandistribution.py
|
pypi
|
# Database objects
The most common task found in SAYN projects is the `autosql` task. This is a type of tasks where you write a `SELECT` statement
and SAYN handles the database object creation. When you run the project you would want a task selecting from a table to run
after the task that creates said table. In order to simplify task dependencies, SAYN considers database objects core concepts
and provides some tools to treat them as such.
## Object specification
In a relational database we typically find database tables organised into schemas, so for example `logs.tournaments` refers to
a table (or view) called `tournaments` in the `logs` schema, whereas `arenas` refers to a table (or view) called `arenas` in the
default schema. SAYN uses the same format to refer to database tables and views, but as we'll see this allows for a more dynamic use.
## Compilation of object names
In a real world scenario we want to write our code in a way that dynamically changes depending on the profile we're running
on (eg: test vs production). This allows for multiple people to collaborate on the same project, wihtout someone's actions
affecting the work of others in the team. Let's consider this example task:
!!! example "tasks/core.yaml"
```
tasks:
example_task:
type: autosql
materialisation: table
file_name: example_task.sql
destination:
schema: models
table: example_model
```
This task uses the `SELECT` statement in the file `sql/example_task.sql` and creates a table `example_model` in the `models` schema
of the default database. Now, if someone in your team runs this task, `models.example_model` will be replace with their new code
and if someone else in the team is executing a task that reads from it it can produce undesired results.
A way to solve this problem could be to have different databases for each person in a team but that can easily lead to complicated
database setups, potential data governance issues and increased database costs, as you might need a copy of the data per person
working with it.
In SAYN there's another solution: we express database object names like `schema.table` but the code that's execution in the database
is transformed according to personal settings. For example, we could have a schema called `analytics_models` where our production lives
and another called `test_models` where we store data produced during development, with table names like `USER_PREFIX_table` rather
than `table` so there's no collision and we minimise data redundancy.
!!! warning
Name configuration only affects the `default_db`. When a SAYN project has more than 1 database
connection you can still use the macros described in this page to set dependencies, but the
resulting value of the macro is exactly the same as the input.
## Name configuration
The modifications described above are setup with prefixes, suffixes and overrides. For example:
!!! example "settings.yaml"
```
profiles:
test:
schema_prefix: test
table_prefix: up
```
The above will make every `schema.table` specification to be compiled to `test_schema.up_table`.
Following the example in the previous section, if we want the to call the production schema `analytics_models` we can do so by
adding the prefix in the `project.yaml` file:
!!! example "project.yaml"
```
schema_prefix: analytics
```
Having both files above configured like that will make it so that referencing `models.example_model` on production will be translated
to `analytics_models.example_model` whereas the same code during testing will be translated as `test_models.up_example_model`. In other
words, what we define in `project.yaml` is the default behaviour which can be overriden in `settings.yaml`.
Aside from `schema_prefix` and `table_prefix` we also have `suffix` (`schema_suffix` and `table_suffix`) which as expected would instead
of prepending the value and an underscore, it adds that at the end.
!!! info
Although the name of these settigns is `table_*` this also applies to views in the database. Similarly
in some databases the concept of `schema` is called differently (eg: dataset in BigQuery) but `schema`
is still used for all databases in SAYN.
## Referencing database objects
So far we've seen how the properties of an autosql task use this database object specification, but the real power of this feature is when
used in the code of the task itself, which we do this with the `src` and `out` macros. For example:
!!! example "settings.yaml"
```
profiles:
test:
schema_prefix: test
table_prefix: up
```
!!! example "sql/example_model.sql"
```
SELECT *
FROM {{ src('logs.raw_table') }}
```
Here we're calling the `src` macro that does 2 things:
* Using prefixes and suffixes translates `logs.raw_table` to the appropriate table name
* Declares that `example_task` (as defined earlier in this page) depends on the task(s) that produce `logs.raw_table`
So the output code to be executed in the database will be:
!!! example "compile/core/example_model.sql"
```
-- SAYN adds the table management code
SELECT *
FROM test_logs.up_raw_table
```
The counterpart to `src` is `out` which similarly translates the value to the appropriate database name, as well as it defines database
objects produced by the task. In `autosql` tasks `out` is not present since there's no usage for it, however this is useful for `sql` tasks:
!!! example "sql/example_sql.sql"
```
CREATE OR REPLACE TABLE {{ out('models.sql_example') }} AS
SELECT *
FROM {{ src('logs.raw_table') }}
```
This code tells SAYN that this sql task produces the table `models.sql_example` and depends on the table `logs.raw_table`, while
simultaneously producing this example code to be executed in the database:
!!! example "compile/core/example_sql.sql"
```
CREATE OR REPLACE TABLE test_models.up_sql_example AS
SELECT *
FROM test_logs.up_raw_table
```
`src` and `out` are also available to python tasks, however we use them with `context.src` or `self.src`:
!!! example "python/example.py"
```
@task(sources='logs.raw_table')
def example_python(context, warehouse):
table_name = context.src('logs.raw_table')
data = warehouse.read_data(f"select * from {table_name}")
...
```
!!! example "python/advanced_example.py"
```
class MyTask(PythonTask):
def config(self):
self.table_name = self.src('logs.raw_table')
def run(self):
data = self.default_db.read_data(f"select * from {self.table_name}")
...
```
The above examples are equivalent to each other and we use `context.src` in the decorator form and `self.src` in the more advanced class
model. `context.out` and `self.out` are also available in python tasks and their behaviour is the same as with sql and autosql tasks.
!!! info
`src` should only be used for tables that are managed by the SAYN project. If an external EL tool is being used to load data
into your warehouse, references to these tables should be hardcoded instead, as their names never change depending on your SAYN
profile, nor there are any task dependencies to infer from using `src`.
Note that calling `src` and `out` in the `run` method of a python task class or in the function code when using a decorator doesn't
affect task dependencies, it simply outputs the translated database object name. The task dependency behaviour in python tasks is done
by either calling `self.src` or `self.out` in the `config` method of the class or by passing these references to the `task` decorator
in the `sources` and `outputs` arguments as seen in this example. For more details head to [the python task section](tasks/python).
## Altering the behaviour of `src`
A very common situation when working in your data pipeline is when we have a lot of data to work with but at any point in time while modelling
we find ourselves working only a subset of it. Working with sample data can be inconvenient during development because it hinders our
ability to evaluate the result and the alternative, having a duplicate of the data for every person in the team, can be costly both in
terms of money and time producing and maintaining these duplicates. For this reason SAYN comes equiped with 2 features that simplifies this
switchin: `from_prod` and upstream prod.
`from_prod` is most useful when a team member never deals with a part of the SAYN project. For example, a data analyst that only deals with
modelling tasks in a SAYN project that also has extraction tasks. Upstream prod is most useful when we're doing changes to a small set of task,
so we don't want to have to repopulate all the upstream tables.
### `from_prod` configuration
The first mechanism is `from_prod` which we set in the `settings.yaml` file and override the behaviour of `src`. An example:
!!! example "project.yaml"
```
schema_prefix: analytics
```
!!! example "sql/core/test_table.sql"
```
SELECT *
FROM {{ src('logs.extract_table') }}
```
!!! example "settings.yaml"
```
profiles:
dev:
table_prefix: up
schema_prefix: test
from_prod:
- "logs.*"
```
In the above example we have a task selecting data from `logs.extract_table` which for the purpose of this example we can assume is
created by an extraction task pulling data from an API. On production, `src('logs.extract_table')` will be translated as
`analytics_logs.extract_table`, whereas during development it will be translated as `test_logs.up_extract_table`, given the
configuration in the `dev` profile in `settings.yaml`. However there's also a `from_prod` entry with `logs.*` which is telling
SAYN that all tables or views from the `logs` schema should come from production, so the final code for the `test_table` task will
actually be:
!!! example "compile/core/test_table_select.sql"
```
SELECT *
FROM analytics_logs.extract_table
```
As you can see, we just need to specify a list of tables in `from_prod` to always read from the production configuration, that is, the
settings shared by all team members as specified in `project.yaml`. To make it easier to use, wildcards (`*`) are accepted, so that we
can specify a whole schema like in the example, but we can also specify a list of tables explicitely instead.
`from_prod` can also be specified using environment variables with `export SAYN_FROM_PROD="logs.*"` where the value is a comma
separated list of tables.
!!! warning
To avoid accidentally affecting production tables, `from_prod` only affects `src`. The result of calling `out` always evaluate
to your configuration in `settings.yaml` or environment variables.
### Upstream prod
The second mechanism to override the behaviour of `src` is upstream prod. We use upstream prod by specifying the flag (`-u` or `--upstream-prod`)
when running SAYN while filtering, for example `sayn run -t example_task -u`. When we do this, any reference to tables produced by tasks not
present in the current execution will use the parameters defined in `project.yaml`.
For example:
!!! example "project.yaml"
```
schema_prefix: analytics
```
!!! example "settings.yaml"
```
profiles:
test:
schema_prefix: test
table_prefix: up
```
!!! example "sql/example_model.sql"
```
SELECT *
FROM {{ src('logs.raw_table') }}
```
Running `sayn run -t example_task` will run the following code in the database:
!!! example "compile/core/example_task_create_table.sql"
```
CREATE OR REPLACE TABLE test_models.up_example_model AS
SELECT *
FROM test_logs.up_raw_table
```
So the `src` macro translates `logs.raw_table` to the testing name `test_logs.up_raw_table`. However, with upstream prod
(`sayn run -t example_task -u`) the code executed will be:
!!! example "compile/core/example_task_create_table.sql"
```
CREATE OR REPLACE TABLE test_models.up_example_model AS
SELECT *
FROM analytics_logs.raw_table
```
Since no task in this execution creates `logs.raw_table` in SAYN translates that instead to the production name `analytics_logs.raw_table`,
while the table created is still the test version.
Let' assume now that we have another task that we want to include in the execution:
!!! example "sql/another_example_model.sql"
```
SELECT *
FROM {{ src('models.example_model') }}
```
So when run `sayn run -t example_task another_example_task -u` the code for the `example_task` will remain the same as above,
but the code executed for `another_example_model` will be:
!!! example "compile/core/another_example_task_create_table.sql"
```
CREATE OR REPLACE TABLE test_models.up_another_example_model AS
SELECT *
FROM test_models.up_example_model
```
Because `example_task` is part of this exeuction and produces the table `models.example_model` reference by `another_example_task`
`models.example_model` is translated using the testing settings into `test_models.up_example_model` unlike `logs.raw_table` which
as no task producing it is present in this execution, will be translated into the production name.
With upstream prod it becomes a lot easier to work with your modelling layer without having to duplicate all your upstream tables
for every person in the team or being forced to work with sampled data.
## Advanced usage
For a more advanced usage, we also have `schema_override` and `table_override` which allows us to completely change the behaviour.
With `override` what we do is define the exact value that a schema or table name will have based on some Jinja template logic. To
this template 3 values are passed:
* `table`: the name of the table specified in sayn code
* `schema`: the name of the schema specified in sayn code
* `connection`: the name of the connection it refers to
!!! example "settings.yaml"
```
profiles:
test:
schema_override: "{% if schema != 'logs' %}test{% else %}analytics{% endif %}_{{ schema }}"
table_override: "{% if schema != 'logs' %}up_{{ table }}{% else %}{{ table }}{% endif %}"
```
With this example, a reference to `models.example_model` will be translated as `test_models.up_example_model` but a reference to
`logs.raw_logs` will be translated as `analytics_logs.raw_logs`. This can be useful in cases where someone in the team never
works with data ingestion, so every modelling task ran by them will always reads from production data, rather than having to
duplicate the data or having to work with a sample of this raw data.
!!! warning
Note that with the above example of override, a task writting to the logs schema will always write to the production version
`analytics_logs` so to avoid issue you should always have good permissions setup in your database.
|
/sayn-0.6.11-py3-none-any.whl/docs/database_objects.md
| 0.901161 | 0.986662 |
database_objects.md
|
pypi
|
# `python` Task
## About
The `python` task allows you to run python scripts. Therefore, those tasks can do anything Python can
do. They are extremely useful for data extraction or data science models.
There are two models for specifying python tasks in SAYN: a simple way through using decorators and a more advanced way which is class based.
## Simple Definition of `python` Tasks
You can define `python` tasks in SAYN very simply by using decorators. This will let you write a Python function and turn that function into a task. First, you need to add a group in `project.yaml` pointing to the `.py` file where the task code lives:
!!! example "project.yaml"
```
groups:
decorator_tasks:
type: python
module: decorator_tasks
parameters:
param1: some_value
```
Now all tasks defined in `python/decorator_tasks.py` will be added to the DAG. The `module` property expects a python path from the `python` folder in a similar way as you would import a module in python. For example, if our task definition exists in `python/example_mod/decorator_tasks.py` the value in `module` would be `example_mod.decorator_tasks`.
!!! example "python/decorator_tasks.py"
```
from sayn import task
@task(outputs='logs.api_table', sources='logs.another_table')
def example_task(context, warehouse, param1):
src_table = context.src('logs.another_table')
out_table = context.out('logs.api_table')
warehouse.execute(f'CREATE OR REPLACE TABLE {out_table} AS SELECT * from {src_table}')
```
The above example showcases the key elements to a python task:
* `task`: we import SAYN's `task` decorator which is used to turn functions into SAYN tasks added to the DAG.
* parameters to `task`: we can pass parameters `sources`, `outputs` and `parents` which are either lists of table names or a single table name. This allows SAYN define the task dependencies. We can also pass a value for `on_fail` and `tags`.
* function name: the name of the function (`example_task` here) will be the name of the task. We can use this name with `-t` to execute this task only for example.
* function parameters: arguments to the function have special meaning and so the names need to be respected:
* `context`: is an object granting access to some functionality like project parameters, connections and other functions as seen further down.
* `warehouse`: connection names (`required_credentials` in `project.yaml`) will automatically provide the object of that connection. You can specify any number of connections here.
* param1: the rest of the function arguments are matched against task parameters, these are values defined in the `parameter` property in the group.
!!! info "Python decorators"
Decorators in python are used to modify the behaviour of a function. It can be a bit daunting to understand when we first encounter them but for the purpose of SAYN all you need to know is that `@task` turns a standard python
function into a SAYN task which can assess useful properties via arguments. There are many resources online describing how decorators work,
[for example this](https://realpython.com/primer-on-python-decorators/).
Given the code above, this task will:
* Depend on (execute after) the tasks that produce `logs.another_table` since we added the `sources` argument to the decorator.
* Be the parent of (execute before) any task reading from `logs.api_table` since we added the `outputs` argument to the decorator.
* Get the compiled value of `logs.another_table` and `logs.api_table` and keep in 2 variables. For details on database objects compilation
make sure you check [the database objects page](../database_objects.md).
* Execute a create table statement using the tables above on the database called `warehouse` in the project.
## Advanced `python` Task Definition With Classes
The second model for defining python tasks is through classes. When using this model we get an opportunity to:
* do validation before the task is executed by overloading the `setup` method, which is useful as a way to alert early during the execution that something is incorrectly defined rather than waiting for the task to fail.
* define more complex dependencies than `sources` and `outputs` by overloading the `config` method.
* implement code for the `compile` stage allowing for more early stage indication of problems.
A `python` task using classes is defined as follows:
!!! example "tasks/base.yaml"
```yaml
task_python:
type: python
class: file_name.ClassName
```
Where `class` is a python path to the Python class implementing the task. This code should be stored in the `python` folder of your project, which in itself is a python module that's dynamically loaded, so it needs an empty `__init__.py` file in the folder. The class then needs to be defined as follows:
!!! example "python/file_name.py"
``` python
from sayn import PythonTask
class ClassName(PythonTask):
def config(self):
self.src('logs.source_table')
self.out('logs.output_table')
def setup(self):
# Do some validation of the parameters
return self.success()
def run(self):
# Do something useful
return self.success()
```
In this example:
* We create a new class inheriting from SAYN's PythonTask.
* We set some dependencies by calling `self.src` and `self.out`.
* We define a setup method to do some sanity checks. This method can be skipped, but it's
useful to check the validity of project parameters or so some initial setup.
* We define the actual process to execute during `sayn run` with the `run` method.
* Both `setup` and `run` return the task status as successful `return self.success()`, however we can indicate a task failure to sayn with `return self.fail()`. Failing a python task forces child tasks to be skipped.
???+ attention
Python tasks can return `self.success()` or `self.fail()` to indicate the result of the execution, but it's not mandatory. If the code throws a python exception, the task will be considered as failed.
## Using the SAYN API
When defining our `python` task, you would want to access parts of the SAYN infrastructure like
parameters and connections. When using the decorator model, to access this functionality we need to include the `context` argument in the function, when using the class model the more standard `self` is used, and both give access to the same functionality. The list of available properties through `self` and `context` is:
* `parameters`: accesses project and task parameters. For more details on `parameters`,
see the [Parameters](../parameters.md) section.
* `run_arguments`: provides access to the arguments passed to the `sayn run` command like the incremental values (`full_load`, `start_dt` and `end_dt`).
* `connections`: dictionary containing the databases and other custom API credentials. API connections appear as simple python dictionaries, while databases are SAYN's [Database](../api/database.md) objects.
* `default_db`: provides access to the `default_db` database object specified in the `project.yaml` file.
* `src`: the `src` macro that translates database object names as described in [database objects](../database_objects.md). Bear in mind that using this function also adds dependencies to the task, but only when called from the `config` method of a python task defined with the class model.
* `out`: the `out` macro that translates database object names as described in [database objects](../database_objects.md). Bear in mind that using this function also creates dependencies between tasks, but only when called from the `config` method of a python task defined with the class model.
!!! tip
You can use `self.default_db` to easily perform some operations on the default database such as reading or
loading data or if using decorators simply include an argument with the name of the connection. See the
methods available on the [Database](../api/database.md) API.
!!! tip
We all love `pandas`! If you want to load a pandas dataframe you can use one of these options:
* with the `pandas.DataFrame.to_sql` method: `df.to_sql(self.default_db.engine, 'table')`.
* with the `self.default_db.load_data` method: `self.default_db.load_data('table', df.to_dict('records'))`.
## Logging For `python` Tasks With The SAYN API
The unit of process within a task in SAYN is the `step`. Using steps is useful to indicate current
progress of execution but also for debugging purposes. The [tutorial](../tutorials/tutorial_part3.md) is a good example of usage, as we define the `load_data` task as having 5 steps:
!!! example "python/load_data.py"
```python
context.set_run_steps(
[
"Generate Data",
"Load fighters",
"Load arenas",
"Load tournaments",
"Load battles",
]
)
```
This code defines which steps form the task. Then we can define the start and end
of that step with:
!!! example "python/load_data.py"
```python
with context.step('Generate Data'):
data_to_load = get_data(tournament_battles)
```
Which will output the following on the screen:
!!! example "CLI output"
```bash
[1/7] load_data (started at 15:25): Step [1/5] Generate Data
```
The default cli presentation will show only the current step being executed, which in the
case of the tutorial project goes very quickly. However we can persist these messages using
the debug flag to the cli `sayn run -d` giving you this:
!!! example "CLI ouput"
```bash
[1/7] load_data (started at 15:29)
Run Steps: Generate Data, Load fighters, Load arenas, Load tournaments, Load battles
ℹ [1/5] [15:29] Executing Generate Data
✔ [1/5] [15:29] Generate Data (19.5ms)
ℹ [2/5] [15:29] Executing Load fighters
✔ [2/5] [15:29] Load fighters (16.9ms)
ℹ [3/5] [15:29] Executing Load arenas
✔ [3/5] [15:29] Load arenas (12.3ms)
ℹ [4/5] [15:29] Executing Load tournaments
✔ [4/5] [15:29] Load tournaments (10.9ms)
ℹ [5/5] [15:29] Executing Load battles
✔ [5/5] [15:29] Load battles (210.3ms)
✔ Took (273ms)
```
So you can see the time it takes to perform each step.
Sometimes it's useful to output some extra text beyond steps. In those cases, the API provides
some methods for a more adhoc logging model:
* `self.debug(text)`: debug log to console and file. Not printed unless `-d` is used.
* `self.info(text)`: info log to console and file. Not persisted to the screen if `-d` is not specified.
* `self.warning(text)`: warning log to console and file. Remains on the screen after the task finishes (look for yellow lines).
* `self.error(text)`: error log to console and file. Remains on the screen after the task finishes (look for red lines).
!!! note
`self.error` doesn't abort the execution of the task, nor it sets the final status to being failed.
To indicate a python task has failed, use this construct: `return self.fail(text)` where text
is an optional message string that will be showed on the screen.
For more details on the SAYN API, check the [API reference page](../api/python_task.md).
|
/sayn-0.6.11-py3-none-any.whl/docs/tasks/python.md
| 0.949728 | 0.971618 |
python.md
|
pypi
|
# `autosql` Task **[SUNSETTED]**
## About
The `autosql` task lets you write a `SELECT` statement and SAYN then automates the data processing (i.e. table or view creation, incremental load, etc.) for you.
## Defining `autosql` Tasks
An `autosql` task group is defined as follows:
!!! example "project.yaml"
```
...
groups:
core:
type: autosql
file_name: "core/*.sql"
materialisation: table
destination:
table: "{{ task.name }}"
...
```
An `autosql` task is defined by the following attributes:
* `type`: `autosql`.
* `file_name`: the path to a file **within the sql folder of the project's root**. When defining `autosql` groups in `project.yaml` this property needs to be a glob expression, for example `group/*.sql`.
* `materialisation`: this should be either `table`, `view` or `incremental`. `table` will create a table, `view` will create a view. `incremental` will create a table and will load the data incrementally based on a delete key (see more detail on `incremental` below).
* `destination`: this sets the details of the data processing.
* `tmp_schema`: the (optional) schema which will be used to store any necessary temporary object created in the process. The final compiled value is affected by `schema_prefix`, `schema_suffix` and `schema_override` as specified in [database objects](../database_objects.md).
* `schema`: the (optional) destination schema where the object will be created. The final compiled value is affected by `schema_prefix`, `schema_suffix` and `schema_override` as specified in [database objects](../database_objects.md).
* `table`: is the name of the object that will be created. The final compiled value is affected by `table_prefix`, `table_suffix` and `table_override` as specified in [database objects](../database_objects.md).
* `db`: the (optional) destination database.
* `delete_key`: specifies the incremental process delete key. This is for `incremental` `materialisation` only.
!!! info
By default the task is executed in the database defined by `default_db` in `project.yaml`. `db` can be specified to change this, in which case the connection specified needs to:
* Be a credential from the `required_credentials` list in `project.yaml`.
* Be defined in your `settings.yaml`.
* Be one of the supported [databases](../databases/overview.md).
## Setting Dependencies With `autosql`
With `autosql` tasks, you should use the `src` macro in your `SELECT` statements to implicitly create task dependencies.
!!! example `autosql` query
```
SELECT field1
, field2
FROM {{ src('my_table') }} l
```
By using the `{{ src('my_table') }}` in your `FROM` clause, you are effectively telling SAYN that your task depends on the `my_table` table (or view). As a result, SAYN will look for the task that produces `my_table` and set it as a parent of this `autosql` task automatically.
!!! tip
When using the `src` macro, you can pass a structure formatted as `schema.table` such as `{{ src('my_schema.my_table') }}`. In this case, SAYN interprets the first element as the schema, the second element as the table or view. If you use `schema_prefix` and / or `table_prefix` in your project settings, SAYN will then prepend the `schema_prefix` to the `schema` value and `table_prefix` to the `table` value. For example, if your `schema_prefix` is set to `analytics` and `table_prefix` to `up` then `{{ src('my_schema.my_table') }}` will compile `analytics_my_schema.up_my_table`.
## Advanced Configuration
If you need to amend the configuration (e.g. materialisation) of a specific `autosql` task within a `group`, you can overload the values specified in the YAML group definition. To do this, we simply call `config` from a Jinja tag within the sql file of the task:
!!! example "autosql with config"
```
{{ config(materialisation='view') }}
SELECT ...
```
The above code will override the value of `materialisation` setting defined in YAML to make this model a view. All other parameters
described above in this page are also available to overload with `config` except `db`, `file_name` and `name`.
## Using `autosql` In `incremental` Mode
`autosql` tasks support loads incrementally, which is extremely useful for large data volumes when full
refresh (`materialisation: table`) would be infeasible.
We set an `autosql` task as incremental by:
1. Setting `materialisation` to `incremental`
2. Defining a `delete_key`
!!! example "autosql in incremental mode"
```yaml
...
task_autosql_incremental:
type: autosql
file_name: task_autosql_incremental.sql
materialisation: incremental
destination:
tmp_schema: analytics_staging
schema: analytics_models
table: task_autosql
delete_key: dt
...
```
When using `incremental`, SAYN will do the following in the background:
1. Create a temporary table based on the incremental logic from the SAYN query.
2. Delete from the final table those records for which the `delete_key` value is in the temporary table.
3. Insert the contents of the temporary table into the final table.
In order to make the `SELECT` statement incremental, SAYN provides the following arguments:
* `full_load`: a flag defaulting to `False` and controlled by the `-f` flag in the SAYN command.
If `-f` is passed to the sayn command, the final table will be replaced with the temporary one
in step 2 above, rather than performing a merge of the data.
* `start_dt`: a date defaulting to "yesterday" and controlled by the `-s` flag in the SAYN command.
* `end_dt`: a date defaulting to "yesterday" and controlled by the `-e` flag in the SAYN command.
!!! example "SQL using incremental arguments"
```sql
SELECT dt
, field2
, COUNT(1) AS c
FROM table
WHERE dt BETWEEN {{ start_dt }} AND {{ end_dt }}
GROUP BY 1,2
```
## Defining columns
Autosql tasks accept a `columns` field in the task definition that affects the table creation by enforcing types and column order.
!!! attention
Each supported database might have specific DDL related to it. Below are the DDLs that SAYN supports across all databases. For DDLs related to specific databases see the database-specific pages.
### CREATE TABLE DDLs
SAYN also lets you control the CREATE TABLE statement if you need more specification. This is done with:
* columns: the list of columns including their definitions.
* table_properties: database specific properties that affect table creation (indexes, cluster, sorting, etc.).
* post_hook: SQL statments executed right after the table/view creation.
`columns` can define the following attributes:
* name: the column name.
* type: the column type.
* tests: list of keywords that constraint a specific column
- unique: enforces a unique constraint on the column.
- not_null: enforces a non null constraint on the column.
- allowed_values: list allowed values for the column.
`table_properties` can define the following attributes (database specific):
* indexes:
* sorting: specify the sorting for the table
* distribution_key: specify the type of distribution.
* partitioning: specify the partitioning model for the table.
* clustering: specify the clustering for the table.
!!! attention
Each supported database might have specific `table_properties` related to it; see the database-specific pages for further details and examples.
!!! Attention
If the a primary key is defined in both the `columns` and `indexes` DDL entries, the primary key will be set as part of the `CREATE TABLE` statement only.
!!! example "autosql with columns"
```yaml
...
task_autosql:
type: autosql
file_name: task_autosql.sql
materialisation: table
destination:
tmp_schema: analytics_staging
schema: analytics_models
table: task_autosql
ddl:
columns:
- name: x
type: int
primary: True
- name: y
type: varchar
unique: True
permissions:
role_name: SELECT
...
```
|
/sayn-0.6.11-py3-none-any.whl/docs/tasks/autosql.md
| 0.927552 | 0.955693 |
autosql.md
|
pypi
|
# Tasks
## About
A SAYN project is split into units of execution called tasks. The order of execution of these tasks is given based on the dependencies between them which you specify when writing your tasks. SAYN then takes this information and generates a DAG (Direct Acyclic Graph) automatically.
!!! info
A Directed Acyclic Graph is a data structure which enables the conveniently modelling of tasks and dependencies:
* `graph`: a data structure which consists of `nodes` connected by `edges`.
* `directed`: dependencies have a direction. If there is an `edge` (i.e. a dependency) between two tasks, one will run before the other.
* `acyclic`: there are no circular dependencies. If you process the whole graph, you will never encounter the same task twice.
Dependencies between tasks are defined based on the tables or views that tasks need to read. In SAYN, this is automated through the concept of `sources` and `outputs`. For more custom use, SAYN also supports the manual definition of relationship between tasks through `parents`.
For example, the SAYN tutorial defines the following DAG:

Through tasks, SAYN provides a lot of automation under the hood, so make sure you explore the various task types SAYN offers!
## Task Types
Please see below the available SAYN task types:
- [`autosql`](autosql.md): **[SUNSETTED]** simply write a `SELECT` statement and SAYN automates the data processing (i.e. table or view creation, incremental load, etc.) for you.
- [`python`](python.md): enables you to write a Python process. Can be used for a wide range of cases from data extraction to data science models - anything Python lets you do.
- [`copy`](copy.md): enables to automatically copy data from one database to another.
- [`sql`](sql.md): executes any SQL statement. There can be multiple statements within the SQL file. **OR** simply write a `SELECT` statement and SAYN automates the data processing (i.e. table or view creation, incremental load, etc.) for you.
- [`dummy`](dummy.md): those tasks do not do anything. They can be used as connectors between tasks.
## Defining Tasks
Tasks in SAYN are defined into `groups` which we describe in the `project.yaml` file in your project. Task `groups` define a set of tasks which share the same attributes. For example we can define a group formed of `sql` tasks called `core` like this:
!!! example "project.yaml"
```
groups:
core:
type: sql
file_name: "core/*.sql"
materialisation: table
destination:
table: "{{ task.name }}"
```
The properties defined in the group tell SAYN how to generate tasks:
* `type`: this tells SAYN to create tasks of type [sql](sql.md)
* `file_name`: this property tells SAYN what files to use to generate tasks. The files for sql tasks are stored under the `sql` folder, so this expression is telling us to create a task per file with the extension `sql` found in the `sql/core` folder
* `materialisation`: describes what database object to create in the database
* `destination`: defines where to create the database object, in this case we're just using the name, which will simply be the name of the task
!!! attention
You would always want the `file_name` property used in group definitions to be a [glob expression](https://en.wikipedia.org/wiki/Glob_(programming)) so that it points at a list of files. Any other property defined in groups will be interpreted as described in the page for the task type in this documentation.
When SAYN interprets this group, for every file found matching the glob expression in `file_name` a task will be generated and the name of that task will match the name of the file without the extension. For example if the `sql/core` folder in our project contains 2 files called `table1.sql` and `table2.sql` then 2 tasks will be created called `table1` and `table2`. To allow those 2 tasks to create different tables in the database we use Jinja expressions. In this case we just call the result table exactly the name of the task using `"{{ task.name }}"`.
!!! tip
When a SAYN project grows, it is good practice to start separating your tasks in multiple groups (e.g. extracts, core models, marketing models, finance models, data science, etc.) in order to organise processes.
This definition of `groups` in the `project.yaml` file is available for `autosql`, `sql` and `python` tasks. You can read more about this by heading to the corresponding pages.
## Task Attributes
!!! example "project.yaml"
```
groups:
core:
type: sql
file_name: "core/*.sql"
materialisation: table
destination:
table: "{{ task.name }}"
```
As you saw in the example above, task attributes can be defined in a dynamic way. This example shows how to use the task name to dynamically define a task. This will effectively tell the task to create the outputs of the `core` tasks into tables based on the `task` name, which is the name of the file without the `.sql` extension for `sql` tasks.
!!! tip
You can also reference to `{{ task.group }}` dynamically.
## YAML based definition of tasks
The model described above makes creating a SAYN project very easy, but there are situations where a more advanced model is required. For that we can
define tasks in YAML files under the `tasks` folder at the root level of your SAYN project. Each file in the `tasks` folder represents a [task group](#task_groups) and can be executed independently. By default, SAYN includes any file in the `tasks` folder ending with a `.yaml` extension when creating the DAG.
Within each YAML file, tasks are defined in the `tasks` entry.
!!! example "tasks/base.yaml"
```yaml
tasks:
task_1:
# Task properties
task_2:
# Task properties
# ...
```
All tasks share a number of common properties available:
| Property | Description | Required |
| -------- | ----------- | ---- |
| type | The task type. | Required one of: `autosql`, `sql`, `python`, `copy`, `dummy` |
| preset | A preset to inherit task properties from. See [the presets section](../presets.md) for more info. | Optional name of preset |
| parents | A list of tasks this one depends on. All tasks in this list is ensured to run before the child task. | Optional list |
| sources | A list of database tables or views this task uses. | Optional list |
| outputs | A list of database tables or views this task produces | Optional list |
| tags | A list of tags used in `sayn run -t tag:tag_name`. This allows for advanced task filtering when we don't want to run all tasks in the project. | Optional list |
| on_fail | Defines the behaviour when the [task fails](#task_failure_behaviour). | Optional one of: `skip` or `no_skip` |
!!! attention
Different task types have different attributes. Make sure that you check each task type's specific documentation to understand how to define it.
## Task failure behaviour
When a task fails during an execution, all descendent tasks will be skipped as expected. However sometimes it can be useful to
execute descending tasks even if a parent fails, for example when an API can frequently throw errors and we want to continue the execution just with as much data as it was possible to pull from it. In this case we make use of the `on_fail` task property to
specify that we do not want to skip descending tasks.
!!! example "tasks/base.yaml"
```yaml
tasks:
could_fail_task:
type: python
class: could_fail.CouldFailTask
on_fail: no_skip
child_task:
type: sql
file_name: query_using_could_fail_data.sql
parents:
- failing_task
```
In the above case, if `could_fail_task` fails, `child_task` will not be skipped.
|
/sayn-0.6.11-py3-none-any.whl/docs/tasks/overview.md
| 0.896473 | 0.986598 |
overview.md
|
pypi
|
# `copy` Task
## About
The `copy` task copies tables from one database to another. It can be used to automatically
ingest data from operational databases (e.g. PostgreSQL) to your analytics warehouse.
!!! attention
Copy tasks can only be defined in YAML groups in the tasks folder, not directly in `project.yaml`.
## Defining `copy` Tasks
A `copy` task is defined as follows:
!!! example "tasks/base.yaml"
```yaml
task_copy:
type: copy
source:
db: from_db
schema: from_schema
table: from_table
destination:
tmp_schema: staging_schema
schema: schema
table: table_name
```
`copy` tasks have the following parameters that need to be set:
* `type`: `copy`.
* `source`: the source details
* `db`: the source database.
* `schema`: the (optional) source schema.
* `table`: the name of the table top copy.
* `destination`: the destination details.
* `tmp_schema`: the (optional) staging schema used in the process of copying data.
* `schema`: the (optional) destination schema.
* `table`: the name of the table to store data into.
* `db`: the (optional) destination database.
!!! info
By default the destination is the database defined by `default_db` in `project.yaml`. `db` can be specified to change this, in which case the connection specified needs to:
* Be a credential from the `required_credentials` list in `project.yaml`.
* Be defined in your `settings.yaml`.
* Be one of the supported [databases](../databases/overview.md).
The tables specified in `destination` and `source` will be affected by prefixes, suffixes and overrides as
described in [database objects](../database_objects.md), meaning it only affects tables in the `default_db`
(typically the `destination` in extraction tasks and the `source` in reverse ETL tasks).
By default, tables will be copied in full every time SAYN runs replacing the table with the newly
pulled data. This behaviour can be altered with the following:
* `incremental_key`: the column to use to determine what data is new. The process will transfer
any data in the source table with an `incremental_key` value superior or equal to the maximum
found in the destination, or with a `NULL` value.
* `delete_key`: the column which will be used for deleting data in incremental loads. The process
will delete any data in the destination table with a `delete_key` value found in the new dataset
obtained before inserting.
* `append`: a boolean flag indicating if data should be replaced in the destination. This means that
in full load mode (`incremental_key` not specified) records will be appended rather than the table
being recreated every time; and in incremental mode records will not be removed, so `delete_key`
shouldn't be specified. Additionally an extra column `_sayn_load_ts` will be added to the destination
table to help with de-duplication.
!!! example "tasks/base.yaml"
```yaml
task_copy:
type: copy
source:
db: from_db
schema: from_schema
table: from_table
destination:
tmp_schema: staging_schema
schema: schema
table: table_name
incremental_key: updated_at
delete_key: id
```
In this example, we use `updated_at` which is a field updated every time a record changes (or is created)
on a hypothetical backend database to select new records, and then we replace all records in the target
based on the `id`s found in this new dataset.
!!! example "tasks/base.yaml"
```yaml
task_copy:
type: copy
source:
db: from_db
schema: from_schema
table: from_table
destination:
tmp_schema: staging_schema
schema: schema
table: table_name
incremental_key: updated_at
append: True
```
In this other example, whenever the task runs it checks the latest value of `updated_at` and appends to the
destination table every record in the source with an `updated_at` greater than or equal to the maximum value
present in the destination.
While the task is running, SAYN will get records from the source database and load into a temporary table,
and will merge into the destination table once all records have been loaded. The frequency of loading
into this table is determined by the value of `max_batch_rows` as defined in the credentials for the
destination database, which defaults to 50000. However this behaviour can be changed with 2 properties:
* `max_batch_rows`: this allows you to overwrite the value specified in the credential for this task only.
* `max_merge_rows`: this value changes the behaviour so that instead of merging into the destination
table once all rows have been loaded, instead SAYN will merge after this number of records have been
loaded and then it will repeat the whole process. The advantage of using this parameter is that for
copies that take a long time, an error (ie: loosing the connection with the source) wouldn't result
in the process having to be started again from the beginning.
!!! warning
When using `max_merge_rows` SAYN will loop through the merge load and merge process until the number
of records loaded is lower than the value of `max_merge_rows`. In order to avoid infinite loops, the
process will also stop after a maximum of 100 iteration. To avoid issues, it should be set to a very
large value (larger than `max_batch_rows`).
## Data types and columns
`copy` tasks accept a `columns` field in the task definition in the same way that `autosql` does. With this
specification, we can override the default behaviour of copy when it comes to column types by enforcing
specific column types in the final table:
!!! example "tasks/base.yaml"
```yaml
task_copy:
type: copy
source:
db: from_db
schema: from_schema
table: from_table
destination:
tmp_schema: staging_schema
schema: schema
table: table_name
incremental_key: updated_at
delete_key: id
columns:
- id
- name: updated_at
type: timestamp
```
In this example we define 2 columns for `task_copy`: `id` and `updated_at`. This will make SAYN:
1. Copy only those 2 columns, disregarding any other columns present at source
2. Infer the type of `id` based on the type of that column at source
3. Enforce the destination table type for `updated_at` to be `TIMESTAMP`
An additional property `dst_name` in columns is also supported. Specifying this property will
change the name of the column in the destination table. When using this property, `delete_key`
and `incremental_key` need to reference this new name.
!!! example "tasks/base.yaml"
```yaml
task_copy:
type: copy
source:
db: from_db
schema: from_schema
table: from_table
destination:
tmp_schema: staging_schema
schema: schema
table: table_name
incremental_key: updated_ts
delete_key: id
columns:
- id
- name: updated_at
dst_name: updated_ts
```
In this example, the `updated_at` column at source will be called `updated_ts` on the target.
Note the name in `incremental_key` uses the name on the target.
Additionally, in the `ddl` property we can specify indexes and permissions like in `autosql`.
Note that some databases support specific DDL other than these.
|
/sayn-0.6.11-py3-none-any.whl/docs/tasks/copy.md
| 0.904673 | 0.939748 |
copy.md
|
pypi
|
# `sql` Task
## About
The `sql` task provides you with different options for working with SQL queries. It lets you execute a SQL script with one or many statements. This is useful for
executing `UPDATE` statements for example. It also lets you write a `SELECT` statement and SAYN then automates the data processing (i.e. table or view creation, incremental load, etc.) for you.
!!! info
The old `sql` and `autosql` tasks have been combined into one task class, differentiated with the `materialisation` parameter. The old `autosql` task still exists for backwards compatibility.
## Defining `sql` Tasks
An `sql` task group is defined as follows:
!!! example "project.yaml"
```
...
groups:
core:
type: sql
file_name: "core/*.sql"
materialisation: table
destination:
table: "{{ task.name }}"
...
```
!!! simple example "tasks/base.yaml"
```yaml
task_sql:
type: sql
file_name: sql_task.sql
materialisation: script
```
An `sql` task is defined by the following attributes:
* `type`: `sql`.
* `file_name`: the path to a file **within the sql folder of the project's root**. When defining `sql` groups in `project.yaml` this property needs to be a glob expression, for example `group/*.sql`.
* `materialisation`: this should be either `script`, `table`, `view` or `incremental`. `script` will execute the code unmodified (after jinja compilation), `table` will create a table, `view` will create a view. `incremental` will create a table and will load the data incrementally based on a delete key (see more detail on `incremental` below).
* `destination`: is the name of the object that will be created. It is defined like so `schema.table` (similarly to the `src` macro; look bellow). The schema part of the parameter is optional. The final compiled value is affected by `schema_prefix`, `schema_suffix` and `schema_override` as specified in [database objects](../database_objects.md).
* `tmp_schema`: the (optional) schema which will be used to store any necessary temporary object created in the process. The final compiled value is affected by `schema_prefix`, `schema_suffix` and `schema_override` as specified in [database objects](../database_objects.md).
* `table`: The final compiled value is affected by `table_prefix`, `table_suffix` and `table_override` as specified in [database objects](../database_objects.md).
* `db`: the (optional) destination database.
* `delete_key`: specifies the incremental process delete key. This is for `incremental` `materialisation` only.
!!! info
By default the task is executed in the database defined by `default_db` in `project.yaml`. `db` can be specified to change this, in which case the connection specified needs to:
* Be a credential from the `required_credentials` list in `project.yaml`.
* Be defined in your `settings.yaml`.
* Be one of the supported [databases](../databases/overview.md).
## Setting Dependencies With `sql`
With `sql` tasks, you should use the `src` and `out` macro in your `SELECT` statements to implicitly create task dependencies.
!!! example "src in sql query"
```
SELECT field1
, field2
FROM {{ src('my_table') }} l
```
!!! example "out in sql query"
```
CREATE OR REPLACE {{ out('my_table') }} AS
(
SELECT field1
, field2
FROM {{ src('my_other_table') }} l
}
```
By using the `{{ src('my_table') }}` in your `FROM` clause, you are effectively telling SAYN that your task depends on the `my_table` table (or view). As a result, SAYN will look for the task that produces `my_table` and set it as a parent of this `sql` task automatically.
Similarly, by using `{{ out('table') }}` anywhere in the script you can retrieve the full name of the table to be created. In this way, you also tell SAYN the output of the SQL script.
!!! info
The `out` macro is only applicable to the `script` materialisation, as in the other cases you won't need to access the output table as SAYN handles table creation for you.
!!! tip
When using the `src` macro, you can pass a structure formatted as `schema.table` such as `{{ src('my_schema.my_table') }}`. In this case, SAYN interprets the first element as the schema, the second element as the table or view. If you use `schema_prefix` and / or `table_prefix` in your project settings, SAYN will then prepend the `schema_prefix` to the `schema` value and `table_prefix` to the `table` value. For example, if your `schema_prefix` is set to `analytics` and `table_prefix` to `up` then `{{ src('my_schema.my_table') }}` will compile `analytics_my_schema.up_my_table`.
## Advanced Configuration
If you need to amend the configuration (e.g. materialisation) of a specific `sql` task within a `group`, you can overload the values specified in the YAML group definition. To do this, we simply call `config` from a Jinja tag within the sql file of the task:
!!! example "sql with config"
```
{{ config(materialisation='view') }}
SELECT ...
```
The above code will override the value of `materialisation` setting defined in YAML to make this model a view. All other parameters
described above in this page are also available to overload with `config` except `file_name` and `name`. Other
properties are available for overloading for advanced use cases: `parents`, `outputs` and `sources`.
## Using `sql` In `incremental` Mode
`sql` tasks support loads incrementally, which is extremely useful for large data volumes when full
refresh (`materialisation: table`) would be infeasible.
We set an `sql` task as incremental by:
1. Setting `materialisation` to `incremental`
2. Defining a `delete_key`
!!! example "sql in incremental mode"
```yaml
...
task_sql_incremental:
type: sql
file_name: task_sql_incremental.sql
materialisation: incremental
destination:
tmp_schema: analytics_staging
schema: analytics_models
table: task_sql
delete_key: dt
...
```
When using `incremental`, SAYN will do the following in the background:
1. Create a temporary table based on the incremental logic from the SAYN query.
2. Delete from the final table those records for which the `delete_key` value is in the temporary table.
3. Insert the contents of the temporary table into the final table.
In order to make the `SELECT` statement incremental, SAYN provides the following arguments:
* `full_load`: a flag defaulting to `False` and controlled by the `-f` flag in the SAYN command.
If `-f` is passed to the sayn command, the final table will be replaced with the temporary one
in step 2 above, rather than performing a merge of the data.
* `start_dt`: a date defaulting to "yesterday" and controlled by the `-s` flag in the SAYN command.
* `end_dt`: a date defaulting to "yesterday" and controlled by the `-e` flag in the SAYN command.
!!! example "SQL using incremental arguments"
```sql
SELECT dt
, field2
, COUNT(1) AS c
FROM table
WHERE dt BETWEEN {{ start_dt }} AND {{ end_dt }}
GROUP BY 1,2
```
## Defining columns
Sql tasks accept a `columns` field in the task definition that affects the table creation by enforcing types and column order.
!!! attention
Each supported database might have specific DDL related to it. Below are the DDLs that SAYN supports across all databases. For DDLs related to specific databases see the database-specific pages.
### CREATE TABLE DDLs
SAYN also lets you control the CREATE TABLE statement if you need more specification. This is done with:
* columns: the list of columns including their definitions.
* table_properties: database specific properties that affect table creation (indexes, cluster, sorting, etc.).
* post_hook: SQL statements executed right after the table/view creation.
`columns` can define the following attributes:
* name: the column name.
* type: the column type.
* tests: list of keywords that constraint a specific column
- unique: enforces a unique constraint on the column.
- not_null: enforces a non null constraint on the column.
- allowed_values: list allowed values for the column.
`table_properties` can define the following attributes (database specific):
* indexes: specify the indexing for the table.
* sorting: specify the sorting for the table.
* distribution_key: specify the type of distribution.
* partitioning: specify the partitioning model for the table.
* clustering: specify the clustering for the table.
!!! attention
Each supported database might have specific `table_properties` related to it; see the database-specific pages for further details and examples.
!!! Attention
If the a primary key is defined in both the `columns` and `indexes` DDL entries, the primary key will be set as part of the `CREATE TABLE` statement only.
!!! example "sql with columns"
```yaml
...
task_sql:
type: sql
file_name: task_sql.sql
materialisation: table
destination:
tmp_schema: analytics_staging
schema: analytics_models
table: task_sql
ddl:
columns:
- name: x
type: int
primary: True
- name: y
type: varchar
unique: True
permissions:
role_name: SELECT
...
```
|
/sayn-0.6.11-py3-none-any.whl/docs/tasks/sql.md
| 0.937117 | 0.962813 |
sql.md
|
pypi
|
# SAYN Project Example: Reddit News NLP
## Project Description
#### Overview
This is an example SAYN project which shows how to use SAYN for data modelling and processing. You can find the GitHub repository
[here](https://github.com/173TECH/sayn_project_example_nlp_news_scraping){target="\_blank"}.
This project does the following:
* Extracts article data from Reddit RSS feeds
* Loads it into a SQLite database
* Cleans the extracted data
* Performs some basic text analysis on the transformed data
#### Features Used
* [Python tasks](../tasks/python.md) to extract and analyse data
* [Autosql tasks](../tasks/autosql.md) to automate SQL transformations.
* Usage of [parameters](../parameters.md) to make the code dynamic.
* Usage of [presets](../presets.md) to define tasks.
In addition to SAYN, this project uses the following packages:
* RSS feed data extraction: `feedparser`
* Data processing: `numpy`, `pandas`, `nltk`
* Visualisations: `matplotlib`, `wordcloud`, `pillow`
#### Running The Project
* Clone the repository with the command `git clone https://github.com/173TECH/sayn_project_example_nlp_news_scraping`.
* Rename the `sample_settings.yaml` file to `settings.yaml`.
* Install the project dependencies by running the `pip install -r requirements.txt` command from the root of the project folder.
* Run all SAYN commands from the root of the project folder.
<br>
## Implementation Details
### Step 1: Extract Task Group
Quick Summary:
* Create the task group `extract.yaml`
* Create a [python task](../tasks/python.md) to extract and load the data
<br>
#### Task Details (`load_data`)
First, we need to define our `extract` group in our tasks folder. This group will only include the `load_data` task. This is quite a simple [python task](../tasks/python.md) which will use the `LoadData` class from `load_data.py` which we will create later.
Our `load_data` task will have two [parameters](../parameters.md):
* `table`: name of the table we plan to create in our database
* `links`: list of links to rss feeds
??? example "tasks/extract.yaml"
```yaml
tasks:
load_data:
type: python
class: load_data.LoadData
parameters:
table: logs_reddit_feeds
links:
- https://www.reddit.com/r/USnews/new/.rss
- https://www.reddit.com/r/UKnews/new/.rss
- https://www.reddit.com/r/EUnews/new/.rss
```
???+ note
Parameters are not a requirement, however parameters make the code dynamic which is useful for reusability.
The `load_data` task will have the following steps:
* `Appending Reddit data to dataframe`: loops through the links array, appends data from each link to a dataframe
* `Updating database`: loads dataframe into SQLite database using `pandas.to_sql` method
###### LoadData Class
Next, we will create our `LoadData` class.
Our LoadData inherits properties from SAYN's PythonTask, in addition it will have 3 methods:
* `fetch_reddit_data`: fetches data from the Reddit RSS feeds
* `setup`: sets the order of steps to run
* `run`: defines what each step does during the run
???+ attention
`fetch_reddit_data` is a utility method for this task, while `setup` and `run` are the usual SAYN methods. Please note that methods `setup` and `run` need to return either `self.success()` or `self.fail()` in order to run.
###### Utility Method (`fetch_reddit_data`)
The `fetch_reddit_data` function uses the `feedparser.parse` method to fetch the raw data from the rss feed link. It then converts the data into a `pandas dataframe` to make it easier to work with.
The function also extracts the source of each article and adds it under the `source` column.
??? example "python/load_data.py"
``` python
import pandas as pd
import feedparser as f
from sayn import PythonTask
class LoadData(PythonTask):
def fetch_reddit_data(self, link):
"""Parse and label RSS Reddit data then return it in a pandas DataFrame"""
# get data from supplied link
raw_data = f.parse(link)
# transform data to dataframe
data = pd.DataFrame(raw_data.entries)
# select columns of interest
data = data.loc[:, ["id", "link", "updated", "published", "title"]]
# get the source, only works for Reddit RSS feeds
source_elements = link.split("/")
data["source"] = source_elements[4] + "_" + source_elements[5]
return data
def setup(self):
self.set_run_steps(["Appending Reddit data to dataframe", "Updating database"])
return self.success()
def run(self):
with self.step("Appending Reddit data to dataframe"):
links = self.parameters["links"]
table = self.parameters["user_prefix"] + self.task_parameters["table"]
df = pd.DataFrame()
for link in links:
temp_df = self.fetch_reddit_data(link)
n_rows = len(temp_df)
df = df.append(temp_df)
self.info(f"Loading {n_rows} rows into destination: {table}....")
with self.step("Updating database"):
if df is not None:
df.to_sql(
table, self.default_db.engine, if_exists="append", index=False
)
return self.success()
```
??? tip
`self.parameters["user_prefix"]` is set dynamically based on what you set it to in project.yaml, this can also be overwritten in settings.yaml
### Step 2: Modelling Group
Quick Summary:
* Create the SQL query `dim_reddit_feeds.sql` to filter out duplicates
* Create a modelling [preset](../presets.md) in `project.yaml`
* Create the task group `modelling.yaml`
<br>
#### Task Details (`dim_reddit_feeds`)
Currently our `load_data` task appends data to our database but it does not filter out any potential duplicates that we might encounter after multiple runs. This is where the `modelling` group comes in, we can define an [AutoSQL task](../tasks/autosql.md) to filter out any duplicates.
First, we need to create a sql query in our `sql` folder that will filter out any duplicates; we will call it `dim_reddit_feeds.sql`
??? example "sql/dim_reddit_feeds.sql"
```sql
SELECT DISTINCT id
, title
, published
, updated
, link
, source
FROM {{user_prefix}}logs_reddit_feeds
```
??? tip
`{{user_prefix}}` is set dynamically. The default value is set in `project.yaml`. This can be overwritten using profiles in `settings.yaml`.
Next, we will define a modelling [preset](../presets.md) in `project.yaml`. [Presets](../presets.md) enable you to create a task prototype which can be reused when defining tasks. Hence, the modelling [preset](../presets.md) will simplify the code in `modelling.yaml` while also allowing us to set dynamic file and table names.
???+ attention
Presets defined in `project.yaml` are project level presets, you can also define presets within individual task groups.
??? example "project.yaml"
```yaml
required_credentials:
- warehouse
default_db: warehouse
presets:
modelling:
type: autosql
materialisation: table
file_name: "{{ task.name }}.sql"
destination:
table: "{{ user_prefix }}{{ task.name }}"
parameters:
user_prefix:
```
??? tip
`{{ task.name }}` returns the name of task
Now that we have the modelling [preset](../presets.md), we can use it in the `modelling` group. Since we want `dim_reddit_feeds` to run after our `load_data` task, we will need to set the parents of the task to `load_data`.
??? example "tasks/modelling.yaml"
```yaml
tasks:
dim_reddit_feeds:
preset: modelling
parents:
- load_data
```
### Step 3: Data Science Group
Quick Summary:
* Create the task group `data_science.yaml`
* Create the [python task](../tasks/python.md) `wordcloud` to generate wordclouds
* Create the [python task](../tasks/python.md) `nlp` to generate text statistics
* Create the [AutoSQL task](../tasks/autosql.md) `dim_reddit_feeds_nlp_stats` to calculate aggregate statistics grouped by source
<br>
#### Group Overview
Now that we have our cleaned dataset, we can utilise [python tasks](../tasks/python.md) to do some natural language processing on our text data. In particular, we will use two libraries for this analysis:
* `nltk`: for basic text statistics
* `wordcloud`: for generating wordcloud visualisations
First, we need to create the `data_science` group in the `tasks` folder. There will be two tasks within this group:
* `nlp`: generates the text statistics
* `wordcloud`: generates the wordclouds
Both tasks will use data from our `dim_reddit_feeds` table, therefore we will need to set their their table [parameters](../parameters.md) to `dim_reddit_feeds`. Since both of these tasks are children of the `dim_reddit_feeds` task, we will also need to set their parents attributes to `dim_reddit_feeds`.
The `wordcloud` task has a `stopwords` parameter, this parameter provides additional context related stopwords.
??? example "tasks/data_science.yaml"
```yaml
tasks:
nlp:
type: python
class: nlp.LanguageProcessing
parents:
- dim_reddit_feeds
parameters:
table: dim_reddit_feeds
wordcloud:
type: python
class: wordcloud.RenderCloud
parents:
- dim_reddit_feeds
parameters:
table: dim_reddit_feeds
stopwords:
- Reddit
```
#### Task Details (`wordcloud`)
The `wordcloud` task will have the following steps:
* `Grouping texts`: aggregates article titles and groups them by source
* `Generating clouds`: generates a wordcloud for each source, as well as the full dataset
###### RenderCloud Class
Next, we can define the class `RenderCloud` for the `wordcloud` task. `RenderCloud` has 3 methods:
* `word_cloud`: generates a wordcloud visualisation
* `setup`: sets the order of steps to run
* `run`: defines what each step does during the run
???+ attention
`word_cloud` is a utility method for this task, while `setup` and `run` are the usual SAYN methods. Please note that methods `setup` and `run` need to return either `self.success()` or `self.fail()` in order to run.
??? example "python/wordcloud.py"
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sayn import PythonTask
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
class RenderCloud(PythonTask):
def word_cloud(
self, name, text, stopwords, b_colour="white", c_colour="black", show=False
):
"""Word cloud generating function"""
# attempt to find a compatible mask
try:
mask = np.array(Image.open(f"python/img/masks/{name}_mask.png"))
image_colours = ImageColorGenerator(mask)
except:
mask = None
image_colours = None
wordcloud = WordCloud(
stopwords=stopwords,
max_words=100,
mask=mask,
background_color=b_colour,
contour_width=1,
contour_color=c_colour,
color_func=image_colours,
).generate(text)
# store wordcloud image in "python/img"
wordcloud.to_file(f"python/img/{name}_wordcloud.png")
# declare show=True if you want to show wordclouds
if show:
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
def setup(self):
self.set_run_steps(["Grouping texts", "Generating clouds"])
return self.success()
def run(self):
with self.step("Grouping texts"):
table = self.parameters["user_prefix"] + self.task_parameters["table"]
df = pd.DataFrame(self.default_db.read_data(f"SELECT * FROM {table}"))
full_text = " ".join(article for article in df.title)
sources = df.groupby("source")
grouped_texts = sources.title.sum()
with self.step("Generating clouds"):
stopwords = STOPWORDS.update(self.parameters["stopwords"])
self.info("Generating reddit_wordcloud.png")
self.word_cloud("reddit", full_text, stopwords)
# Source specific wordclouds
for group, text in zip(grouped_texts.keys(), grouped_texts):
self.info(f"Generating {group}_wordcloud.png")
self.word_cloud(
group, text, stopwords, b_colour="black", c_colour="white"
)
return self.success()
```
#### Task Details (`nlp`)
The `nlp` task will have the following steps:
* `Processing texts`: generates text statistics for each title
* `Updating database`: similar to LoadData step, has additional debugging information
###### LanguageProcessing Class
Moving on, we can define the class `LanguageProcessing` for the `nlp` task. `LanguageProcessing` has 3 methods:
* `desc_text`: provides counts of letters, words and sentences in an article
* `setup`: sets the order of steps to run
* `run`: defines what each step does during the run
???+ attention
`desc_text` is a utility method for this task, while `setup` and `run` are the usual SAYN methods. Please note that methods `setup` and `run` need to return either `self.success()` or `self.fail()` in order to run.
??? example "python/nlp.py"
```python
import pandas as pd
from sayn import PythonTask
from nltk import download
from nltk.tokenize import word_tokenize, sent_tokenize
download("punkt")
class LanguageProcessing(PythonTask):
def desc_text(self, df, text_field, language):
"""Text stats generating function"""
# counts the number of letters in text_field
df[text_field + "_letters"] = df[text_field].fillna("").str.len()
# counts the number of words in text_field
df[text_field + "_words"] = (
df[text_field]
.fillna("")
.apply(lambda x: len(word_tokenize(x, language=language)))
)
# counts the number of sentences in text_field
df[text_field + "_sentences"] = (
df[text_field]
.fillna("")
.apply(lambda x: len(sent_tokenize(x, language=language)))
)
def setup(self):
self.set_run_steps(["Processing texts", "Updating database"])
return self.success()
def run(self):
with self.step("Processing texts"):
table = self.parameters["user_prefix"] + self.task_parameters["table"]
df = pd.DataFrame(self.default_db.read_data(f"SELECT * FROM {table}"))
self.info(f"Processing texts for title field")
self.desc_text(df, "title", "english")
with self.step("Updating database"):
if df is not None:
output = f"{table}_{self.name}"
n_rows = len(df)
self.info(f"Loading {n_rows} rows into destination: {output}....")
df.to_sql(
output, self.default_db.engine, if_exists="replace", index=False
)
return self.success()
```
#### Task Details (`dim_reddit_feeds_nlp_stats`)
Now that we have individual article statistics, it would be a good idea to create an additional modelling task to find some aggregate statistics grouped by source. Let's create another SQL query called `dim_reddit_feeds_nlp_stats` in the `sql` folder. This query will give us the average, grouped by source, of the text statistics generated by the `nlp` task.
??? example "sql/dim_reddit_feeds_nlp_stats.py"
```sql
SELECT source
, AVG(title_letters) AS average_letters
, AVG(title_words) AS average_words
, AVG(title_sentences) AS average_sentences
FROM {{user_prefix}}dim_reddit_feeds_nlp
GROUP BY 1
ORDER BY 1
```
Finally, we can add the `dim_reddit_feeds_nlp_stats` task to the the `modelling` group. Like the previous modelling task, we will create this task using the modelling [preset](../presets.md) in `project.yaml`; setting the parents parameter to `nlp`. We want to materialise this query as a view; therefore, we will need to overwrite the materialisation parameter of the preset.
??? example "modelling.yaml"
```yaml
tasks:
dim_reddit_feeds:
preset: modelling
parents:
- load_data
dim_reddit_feeds_nlp_stats:
preset: modelling
materialisation: view
parents:
- nlp
```
#### Step 4: Run the project
All that's left is to run the project in the command line. Change your directory to this project's folder and enter `sayn run`.
???+ attention
Please note that if you did not clone the git repo, you may have some issues with the wordcloud generation. We recommend you create a folder called `img` within the `python` folder, if you do not already have one.
|
/sayn-0.6.11-py3-none-any.whl/docs/project_examples/reddit_news_nlp.md
| 0.896399 | 0.987338 |
reddit_news_nlp.md
|
pypi
|
# SAYN Project Example: BBC News NLP
## Project Description
#### Overview
This is an example SAYN project which shows how to use SAYN for data modelling and processing. You can find the GitHub repository
[here](https://github.com/173TECH/sayn_project_example_nlp_news_scraping){target="\_blank"}.
This project does the following:
* Extracts article data from BBC RSS feeds
* Loads it into a SQLite database
* Cleans the extracted data
* Performs some basic text analysis on the transformed data
#### Features Used
* [Python tasks](../tasks/python.md) to extract and analyse data
* [Autosql tasks](../tasks/autosql.md) to automate SQL transformations.
* Usage of [parameters](../parameters.md) to make the code dynamic.
* Usage of [presets](../presets.md) to define tasks.
In addition to SAYN, this project uses the following packages:
* RSS feed data extraction: `feedparser`
* Data processing: `numpy`, `pandas`, `nltk`
* Visualisations: `matplotlib`, `wordcloud`, `pillow`
#### Running The Project
* Clone the repository with the command `git clone https://github.com/173TECH/sayn_project_example_nlp_news_scraping`.
* Rename the `sample_settings.yaml` file to `settings.yaml`.
* Install the project dependencies by running the `pip install -r requirements.txt` command from the root of the project folder.
* Run all SAYN commands from the root of the project folder.
<br>
## Implementation Details
### Step 1: Extract Task Group
Quick Summary:
* Create the task group `extract.yaml`
* Create a [python task](../tasks/python.md) to extract and load the data
<br>
#### Task Details (`load_data`)
First, we need to define our `extract` group in our tasks folder. This group will only include the `load_data` task. This is quite a simple [python task](../tasks/python.md) which will use the `LoadData` class from `load_data.py` which we will create later.
Our `load_data` task will have two [parameters](../parameters.md):
* `table`: name of the table we plan to create in our database
* `links`: list of links to rss feeds
??? example "tasks/extract.yaml"
```yaml
tasks:
load_data:
type: python
class: load_data.LoadData
parameters:
table: logs_bbc_feeds
links:
- http://feeds.bbci.co.uk/news/england/rss.xml
- http://feeds.bbci.co.uk/news/wales/rss.xml
- http://feeds.bbci.co.uk/news/scotland/rss.xml
- http://feeds.bbci.co.uk/news/northern_ireland/rss.xml
- http://feeds.bbci.co.uk/news/world/us_and_canada/rss.xml
- http://feeds.bbci.co.uk/news/world/middle_east/rss.xml
- http://feeds.bbci.co.uk/news/world/latin_america/rss.xml
- http://feeds.bbci.co.uk/news/world/europe/rss.xml
- http://feeds.bbci.co.uk/news/world/asia/rss.xml
- http://feeds.bbci.co.uk/news/world/africa/rss.xml
```
???+ note
Parameters are not a requirement, however parameters make the code dynamic which is useful for reusability.
The `load_data` task will have the following steps:
* `Appending BBC data to dataframe`: loops through the links array, appends data from each link to a dataframe
* `Updating database`: loads dataframe into SQLite database using `pandas.to_sql` method
###### LoadData Class
Next, we will create our `LoadData` class.
Our LoadData inherits properties from SAYN's PythonTask, in addition it will have 3 methods:
* `fetch_bbc_data`: fetches data from the BBC RSS feeds
* `setup`: sets the order of steps to run
* `run`: defines what each step does during the run
???+ attention
`fetch_bbc_data` is a utility method for this task, while `setup` and `run` are the usual SAYN methods. Please note that methods `setup` and `run` need to return either `self.success()` or `self.fail()` in order to run.
###### Utility Method (`fetch_bbc_data`)
The `fetch_bbc_data` function uses the `feedparser.parse` method to fetch the raw data from the rss feed link. It then converts the data into a `pandas dataframe` to make it easier to work with.
The function also extracts the source of each article and adds it under the `source` column.
Lastly, the function assigns a `unique_id` to each article which is based on its article id and the source it originates from. This is because the same article may be published in multiple sources with the same id, which means our original ids are not unique and could be misleading.
??? example "python/load_data.py"
``` python
import pandas as pd
import feedparser as f
from sayn import PythonTask
class LoadData(PythonTask):
def fetch_bbc_data(self, link):
"""Parse and label RSS BBC News data then return it in a pandas DataFrame"""
# get data from supplied link
raw_data = f.parse(link)
# transform data to dataframe
data = pd.DataFrame(raw_data.entries)
# remove incompatible columns
data.drop(
["title_detail", "summary_detail", "links", "published_parsed"],
axis=1,
inplace=True,
)
# get the source (this only works for BBC RSS feeds)
data["source"] = link[29:-8].replace("/", "_")
# generating ids to be unique, since same story ids can be published in different sources
data["unique_id"] = data["id"] + data["source"]
return data
def setup(self):
self.set_run_steps(["Appending BBC data to dataframe", "Updating database"])
return self.success()
def run(self):
with self.step("Appending BBC data to dataframe"):
links = self.parameters["links"]
table = self.parameters["user_prefix"] + self.task_parameters["table"]
df = pd.DataFrame()
for link in links:
temp_df = self.fetch_bbc_data(link)
n_rows = len(temp_df)
df = df.append(temp_df)
self.info(f"Loading {n_rows} rows into destination: {table}....")
with self.step("Updating database"):
if df is not None:
df.to_sql(
table, self.default_db.engine, if_exists="append", index=False
)
return self.success()
```
??? tip
`self.parameters["user_prefix"]` is set dynamically based on what you set it to in project.yaml, this can also be overwritten in settings.yaml
### Step 2: Modelling Group
Quick Summary:
* Create the SQL query `dim_bbc_feeds.sql` to filter out duplicates
* Create a modelling [preset](../presets.md) in `project.yaml`
* Create the task group `modelling.yaml`
<br>
#### Task Details (`dim_bbc_feeds`)
Currently our `load_data` task appends data to our database but it does not filter out any potential duplicates that we might encounter after multiple runs. This is where the `modelling` group comes in, we can define an [AutoSQL task](../tasks/autosql.md) to filter out any duplicates.
First, we need to create a sql query in our `sql` folder that will filter out any duplicates; we will call it `dim_bbc_feeds.sql`
??? example "sql/dim_bbc_feeds.sql"
```sql
SELECT DISTINCT unique_id
, id
, title
, summary
, link
, guidislink
, published
, source
FROM {{user_prefix}}logs_bbc_feeds
```
??? tip
`{{user_prefix}}` is set dynamically. The default value is set in `project.yaml`. This can be overwritten using profiles in `settings.yaml`.
Next, we will define a modelling [preset](../presets.md) in `project.yaml`. [Presets](../presets.md) enable you to create a task prototype which can be reused when defining tasks. Hence, the modelling [preset](../presets.md) will simplify the code in `modelling.yaml` while also allowing us to set dynamic file and table names.
???+ attention
Presets defined in `project.yaml` are project level presets, you can also define presets within individual task groups.
??? example "project.yaml"
```yaml
required_credentials:
- warehouse
default_db: warehouse
presets:
modelling:
type: autosql
materialisation: table
file_name: "{{ task.name }}.sql"
destination:
table: "{{ user_prefix }}{{ task.name }}"
parameters:
user_prefix:
```
??? tip
`{{ task.name }}` returns the name of task
Now that we have the modelling [preset](../presets.md), we can use it in the `modelling` group. Since we want `dim_bbc_feeds` to run after our `load_data` task, we will need to set the parents of the task to `load_data`.
??? example "tasks/modelling.yaml"
```yaml
tasks:
dim_bbc_feeds:
preset: modelling
parents:
- load_data
```
### Step 3: Data Science Group
Quick Summary:
* Create the task group `data_science.yaml`
* Create the [python task](../tasks/python.md) `wordcloud` to generate wordclouds
* Create the [python task](../tasks/python.md) `nlp` to generate text statistics
* Create the [AutoSQL task](../tasks/autosql.md) `dim_bbc_feeds_nlp_stats` to calculate aggregate statistics grouped by source
<br>
#### Group Overview
Now that we have our cleaned dataset, we can utilise [python tasks](../tasks/python.md) to do some natural language processing on our text data. In particular, we will use two libraries for this analysis:
* `nltk`: for basic text statistics
* `wordcloud`: for generating wordcloud visualisations
First, we need to create the `data_science` group in the `tasks` folder. There will be two tasks within this group:
* `nlp`: generates the text statistics
* `wordcloud`: generates the wordclouds
Both tasks will use data from our `dim_bbc_feeds` table, therefore we will need to set their their table [parameters](../parameters.md) to `dim_bbc_feeds`. Since both of these tasks are children of the `dim_bbc_feeds` task, we will also need to set their parents attributes to `dim_bbc_feeds`.
The `nlp` task has a `text` parameter, this parameter specifies which columns have text for processing.
The `wordcloud` task has a `stopwords` parameter, this parameter provides additional context related stopwords (e.g. "say" and its variations seem to be very common in summaries, however they are not very informative).
??? example "tasks/data_science.yaml"
```yaml
tasks:
nlp:
type: python
class: nlp.LanguageProcessing
parents:
- dim_bbc_feeds
parameters:
table: dim_bbc_feeds
text:
- title
- summary
wordcloud:
type: python
class: wordcloud.RenderCloud
parents:
- dim_bbc_feeds
parameters:
table: dim_bbc_feeds
stopwords:
- say
- said
- says
- will
- country
- US
- England
- Scotland
- Wales
- NI
- Ireland
- Europe
- BBC
- yn
```
#### Task Details (`wordcloud`)
The `wordcloud` task will have the following steps:
* `Grouping texts`: aggregates article summaries and groups them by source (summaries are used instead of titles since they tend to be longer)
* `Generating clouds`: generates a wordcloud for each source, as well as the full dataset
###### RenderCloud Class
Next, we can define the class `RenderCloud` for the `wordcloud` task. `RenderCloud` has 3 methods:
* `word_cloud`: generates a wordcloud visualisation
* `setup`: sets the order of steps to run
* `run`: defines what each step does during the run
???+ attention
`word_cloud` is a utility method for this task, while `setup` and `run` are the usual SAYN methods. Please note that methods `setup` and `run` need to return either `self.success()` or `self.fail()` in order to run.
??? example "python/wordcloud.py"
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sayn import PythonTask
from PIL import Image
from wordcloud import WordCloud, STOPWORDS
class RenderCloud(PythonTask):
def word_cloud(
self, name, text, stopwords, b_colour="white", c_colour="firebrick", show=False
):
"""Word cloud generating function"""
# attempt to find a compatible mask
try:
mask = np.array(Image.open(f"python/img/masks/{name}_mask.png"))
except:
mask = None
wordcloud = WordCloud(
stopwords=stopwords,
max_words=100,
mask=mask,
background_color=b_colour,
contour_width=1,
contour_color=c_colour,
).generate(text)
# store wordcloud image in "python/img"
wordcloud.to_file(f"python/img/{name}_wordcloud.png")
# declare show=True if you want to show wordclouds
if show:
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
def setup(self):
self.set_run_steps(["Grouping texts", "Generating clouds"])
return self.success()
def run(self):
with self.step("Grouping texts"):
table = self.parameters["user_prefix"] + self.task_parameters["table"]
df = pd.DataFrame(self.default_db.read_data(f"SELECT * FROM {table}"))
full_text = " ".join(article for article in df.summary)
sources = df.groupby("source")
grouped_texts = sources.summary.sum()
with self.step("Generating clouds"):
stopwords = STOPWORDS.update(self.parameters["stopwords"])
self.info("Generating bbc_wordcloud.png")
self.word_cloud(
"bbc", full_text, stopwords, b_colour="white", c_colour="black"
)
# Source specific wordclouds
for group, text in zip(grouped_texts.keys(), grouped_texts):
self.info(f"Generating {group}_wordcloud.png")
self.word_cloud(group, text, stopwords)
return self.success()
```
#### Task Details (`nlp`)
The `nlp` task will have the following steps:
* `Processing texts`: loops through text_fields, generates text statistics on each entry
* `Updating database`: similar to LoadData step, has additional debugging information
###### LanguageProcessing Class
Moving on, we can define the class `LanguageProcessing` for the `nlp` task. `LanguageProcessing` has 3 methods:
* `desc_text`: provides counts of letters, words and sentences in an article
* `setup`: sets the order of steps to run
* `run`: defines what each step does during the run
???+ attention
`desc_text` is a utility method for this task, while `setup` and `run` are the usual SAYN methods. Please note that methods `setup` and `run` need to return either `self.success()` or `self.fail()` in order to run.
??? example "python/nlp.py"
```python
import pandas as pd
from sayn import PythonTask
from nltk import download
from nltk.tokenize import word_tokenize, sent_tokenize
download("punkt")
class LanguageProcessing(PythonTask):
def desc_text(self, df, text_field, language):
"""Text stats generating function"""
# counts the number of letters in text_field
df[text_field + "_letters"] = df[text_field].fillna("").str.len()
# counts the number of words in text_field
df[text_field + "_words"] = (
df[text_field]
.fillna("")
.apply(lambda x: len(word_tokenize(x, language=language)))
)
# counts the number of sentences in text_field
df[text_field + "_sentences"] = (
df[text_field]
.fillna("")
.apply(lambda x: len(sent_tokenize(x, language=language)))
)
def setup(self):
self.set_run_steps(["Processing texts", "Updating database"])
return self.success()
def run(self):
with self.step("Processing texts"):
table = self.parameters["user_prefix"] + self.task_parameters["table"]
text_fields = self.parameters["text"]
df = pd.DataFrame(self.default_db.read_data(f"SELECT * FROM {table}"))
for t in text_fields:
self.info(f"Processing texts for {t} field")
self.desc_text(df, t, "english")
with self.step("Updating database"):
if df is not None:
output = f"{table}_{self.name}"
n_rows = len(df)
self.info(f"Loading {n_rows} rows into destination: {output}....")
df.to_sql(
output, self.default_db.engine, if_exists="replace", index=False
)
return self.success()
```
#### Task Details (`dim_bbc_feeds_nlp_stats`)
Now that we have individual article statistics, it would be a good idea to create an additional modelling task to find some aggregate statistics grouped by source. Let's create another SQL query called `dim_bbc_feeds_nlp_stats` in the `sql` folder. This query will give us the average, grouped by source, of the text statistics generated by the `nlp` task.
??? example "sql/dim_bbc_feeds_nlp_stats.py"
```sql
SELECT source
, AVG(title_letters) AS average_tl
, AVG(title_words) AS average_tw
, AVG(title_sentences) AS average_ts
, AVG(summary_letters) AS average_sl
, AVG(summary_words) AS average_sw
, AVG(summary_sentences) AS average_ss
FROM {{user_prefix}}dim_bbc_feeds_nlp
GROUP BY 1
ORDER BY 1
```
Finally, we can add the `dim_bbc_feeds_nlp_stats` task to the the `modelling` group. Like the previous modelling task, we will create this task using the modelling [preset](../presets.md) in `project.yaml`; setting the parents parameter to `nlp`. We want to materialise this query as a view; therefore, we will need to overwrite the materialisation parameter of the preset.
??? example "modelling.yaml"
```yaml
tasks:
dim_bbc_feeds:
preset: modelling
parents:
- load_data
dim_bbc_feeds_nlp_stats:
preset: modelling
materialisation: view
parents:
- nlp
```
#### Step 4: Run the project
All that's left is to run the project in the command line. Change your directory to this project's folder and enter `sayn run`.
???+ attention
Please note that if you did not clone the git repo, you may have some issues with the wordcloud generation. We recommend you create a folder called `img` within the `python` folder, if you do not already have one.
|
/sayn-0.6.11-py3-none-any.whl/docs/project_examples/bbc_news_nlp.md
| 0.92361 | 0.985909 |
bbc_news_nlp.md
|
pypi
|
# Tutorial Part 3: Using Python With SAYN
The [previous section of this tutorial](tutorial_part2.md) showed you how to use SAYN for data modelling purposes. We will now show you how to use Python with SAYN. This will therefore enable you to write end-to-end ELT processes and data science tasks with SAYN. Let's dive in!
## Adding Your Python Task Group
As we did for the `autosql` tasks, we will need to add a task group for our Python tasks. To do so, add the following to your `project.yaml`:
!!! example "project.yaml"
```yaml
...
groups:
models:
...
say_hello:
type: python
module: say_hello
```
This will do the following:
* Create a task group called `say_hello`.
* All tasks in this group will be of type `python`.
* All functions using the `task` decorator in the file `python/say_hello.py` will be transformed into Python tasks. This file should already exist in your `python` folder and defines one task: `say_hello`. All `python` tasks should be stored in the `python` folder where an `__init__.py` file must exist.
## Writing Your Python Tasks
### A Simple Python Task
Our tutorial project has two `python` tasks. It starts with a simple Python task that interacts with the task's context. This is the `say_hello` task in the `python/say_hello.py` file, defined as follows:
!!! example "python/say_hello.py"
```python
from sayn import task
@task
def say_hello(context):
context.info('Hello!')
```
Here are the core concepts to know for running Python tasks with SAYN:
* You should import the `task` decorator from `sayn` which you can then use to define your tasks. There is a more advanced way to define `python` tasks with classes using SAYN which you can find in the documentation related to `python` tasks.
* Use the `@task` decorator and then simply define your function. The task name will be the name of the function, in this case `say_hello`.
* We pass the `context` to our function, which can then be used in our code to access task related information and control the logger. In our case, we log `Hello!` as information.
### Creating Data Logs
The second task in the `python/load_data.py` module actually does something more interesting. It creates some random logs, which is the data you initially had in the logs table of `dev.db`. First, let's add this task to `project.yaml` by adding a task group:
!!! example "project.yaml"
```yaml
...
groups:
models:
...
logs:
type: python
module: load_data
```
Let's look at whole the code from the `python/load_data.py` file:
!!! example "python/load_data.py"
```python
import random
from uuid import uuid4
from sayn import task
@task()
def say_hello(context):
context.info('Hello!')
@task(outputs=[
'logs_arenas',
'logs_tournaments',
'logs_battles',
'logs_fighters']
)
def load_data(context, warehouse):
fighters = ["Son Goku", "John", "Lucy", "Dr. x", "Carlos", "Dr. y?"]
arenas = ["Earth Canyon", "World Edge", "Namek", "Volcanic Crater", "Underwater"]
tournaments = ["World Championships", "Tenka-ichi Budokai", "King of the Mountain"]
context.set_run_steps(
[
"Generate Dimensions",
"Generate Battles",
"Load fighters",
"Load arenas",
"Load tournaments",
"Load battles",
]
)
with context.step("Generate Dimensions"):
# Add ids to the dimensions
fighters = [
{"fighter_id": str(uuid4()), "fighter_name": val}
for id, val in enumerate(fighters)
]
arenas = [
{"arena_id": str(uuid4()), "arena_name": val}
for id, val in enumerate(arenas)
]
tournaments = [
{"tournament_id": str(uuid4()), "tournament_name": val}
for id, val in enumerate(tournaments)
]
with context.step("Generate Battles"):
battles = list()
for tournament in tournaments:
tournament_id = tournament["tournament_id"]
# Randomly select a number of battles to generate for each tournament
n_battles = random.choice([10, 20, 30])
for _ in range(n_battles):
battle_id = str(uuid4())
# Randomly choose fighters and arena
fighter1_id = random.choice(fighters)["fighter_id"]
fighter2_id = random.choice(
[f for f in fighters if f["fighter_id"] != fighter1_id]
)["fighter_id"]
arena_id = random.choice(arenas)["arena_id"]
# Pick a winner
winner_id = (
fighter1_id if random.uniform(0, 1) <= 0.5 else fighter2_id
)
battles.append(
{
"event_id": str(uuid4()),
"tournament_id": tournament_id,
"battle_id": battle_id,
"arena_id": arena_id,
"fighter1_id": fighter1_id,
"fighter2_id": fighter2_id,
"winner_id": winner_id,
}
)
data_to_load = {
"fighters": fighters,
"arenas": arenas,
"tournaments": tournaments,
"battles": battles,
}
# Load logs
for log_type, log_data in data_to_load.items():
with context.step(f"Load {log_type}"):
warehouse.load_data(f"logs_{log_type}", log_data, replace=True)
```
The second task defined in this module is the `load_data` one. It uses some further features:
* The `load_data` task produces `outputs` which are defined in the decorator. This will enable you to refer to these outputs with the `src` function in `autosql` tasks and automatically set dependencies to the `load_data` task.
* `warehouse` is also passed as a parameter to the function. This enables you to easily access the `warehouse` connection in your task. You can notably see that at the end of the script with the call to `warehouse.load_data`.
### Setting Dependencies With `load_data`
As mentioned, we now have a `python` task which produces some logs which we want our `autosql` tasks to use for the data modelling process. As a result, we should ensure that the `load_data` task is always executed first. Because our `load_data` task produces `outputs`, we can refer to these with the `src` function in `autosql` tasks and automatically create dependencies. For example, the SQL query of the `dim_arenas` task should be changed from:
!!! example "sql/dim_arenas.sql"
```sql
SELECT l.arena_id
, l.arena_name
FROM logs_arenas l
```
To:
!!! example "sql/dim_arenas.sql amended"
```sql
SELECT l.arena_id
, l.arena_name
FROM {{ src('logs_arenas') }} l
```
This will now mention that the `dim_arenas` task sources the `logs_arenas` table which is an `output` of the `load_data` task. SAYN will automatically make `load_data` a parent of the `dim_arenas` task and therefore always execute it before. You can do the same for all the other logs tables used in the other `autosql` tasks.
## What Next?
This is it for our tutorial. You should now have a good understanding of the true power of SAYN! Our documentation has more extensive details about all the SAYN core concepts:
* [Tasks](../tasks/overview.md)
* [Parameters](../parameters.md)
* [Presets](../presets.md)
* [Databases](../databases/overview.md)
* [Data Tests](../tests/overview.md)
Enjoy SAYN and happy ELT-ing! :)
|
/sayn-0.6.11-py3-none-any.whl/docs/tutorials/tutorial_part3.md
| 0.928611 | 0.951097 |
tutorial_part3.md
|
pypi
|
# Tutorial Part 2: Data Modelling With SAYN
In the [first part of the tutorial](tutorial_part1.md), we executed our first SAYN run and went through the core components of a SAYN project. We will now see how to use SAYN for data modelling, a major process of analytics warehousing.
## SQL Tasks With SAYN
SAYN can execute two main types of SQL tasks:
* `autosql`: these tasks take a `SELECT` statement and create a table or view using it. All the processes are automated by SAYN in the background.
* `sql`: these tasks take your SQL statement as is and execute it. These are not covered in this tutorial.
## Defining The Task Group
In order to execute your tasks, you will need to define a task group. This is done in the `project.yaml` file, under the `groups` entry. This is the `models` group we have defined in our project:
!!! example "project.yaml"
```yaml
...
groups:
models:
type: autosql
file_name: "{{ task.group }}/*.sql"
materialisation: table
destination:
table: "{{ task.name }}"
```
This group effectively does the following:
* It creates a task group called `models`.
* This task group is defined to be `autosql` tasks.
* Each file with a `.sql` extension in the `sql/models` folder will be turned into an `autosql` task. Do not worry about the `{{ task.group }}` notation for now, this is simple Jinja templating to dynamically pick the `models` name of the group.
* All tasks from the group will model the output as tables.
* The tables will be named as per the `task` name. This `task` name is automatically generated from your file name, excluding the `.sql` extension.
## Writing Your Models
### A Simple Model
As explained in the previous section, each file with a `.sql` extension in the `sql/models` folder will be turned into an `autosql` task following our `models` group definition.
For example, the `dim_arenas.sql` file in the `sql/models` folder will be turned into a `dim_arenas` task. This is the SQL code of this file:
!!! example "sql/dim_arenas.sql"
```sql
SELECT l.arena_id
, l.arena_name
FROM logs_arenas l
```
When executed, this task will create a table called `dim_arenas` using this SQL code. This is the `dim_arenas` you can find in the `dev.db` SQLite database at the root level of the project folder. You can execute this task only by running the command `sayn run -t dim_arenas`, where `dim_arenas` is our `task` name.
### A Model With Dependency
Now, let's have a look at a model which depends on another model. The file `f_battles.sql` is a good example and actually depends on multiple other models. This is how the SQL query is written:
!!! example "sql/f_battles.sql"
```sql
SELECT t.tournament_name
, t.tournament_name || '-' || CAST(b.battle_id AS VARCHAR) AS battle_id
, a.arena_name
, f1.fighter_name AS fighter1_name
, f2.fighter_name AS fighter2_name
, w.fighter_name AS winner_name
FROM logs_battles b
LEFT JOIN {{ src('dim_tournaments') }} t
ON b.tournament_id = t.tournament_id
LEFT JOIN {{ src('dim_arenas') }} a
ON b.arena_id = a.arena_id
LEFT JOIN {{ src('dim_fighters') }} f1
ON b.fighter1_id = f1.fighter_id
LEFT JOIN {{ src('dim_fighters') }} f2
ON b.fighter2_id = f2.fighter_id
LEFT JOIN {{ src('dim_fighters') }} w
ON b.winner_id = w.fighter_id
```
As you can see, this query uses another Jinja templating notation, the `src` function which is core to using SAYN efficiently. You pass this function the name of a table, and SAYN will automatically build the dependencies between your tasks! For example, this `f_battles` task sources the table `dim_tournaments` (amongst others) with the `src` function. As a result, SAYN will look for the task that produces this `dim_tournaments` table (which is the `dim_tournaments` task) and set this task as a parent of the `f_battles` task. Therefore, `dim_tournaments` will always execute before `f_battles`. From the above code, you can see that many tasks will be set as parents of the `f_battles` task.
### Changing Your Model Materialisation
You can easily amend the configuration of a single task when necessary with SAYN. For example, the task `f_rankings`, generated by `f_rankings.sql`, uses the following query:
!!! example "sql/f_rankings.sql"
```sql
{{ config(
materialisation='view'
)
}}
SELECT fr.fighter_name
, CAST(SUM(fr.is_winner) AS FLOAT) / COUNT(DISTINCT fr.battle_id) AS win_rate
FROM {{ src('f_fighter_results') }} fr
GROUP BY 1
ORDER BY 2 DESC
```
As you can see, this task uses the `config` function, which will in this case overwrite the materialisation of the task's output to a view instead of a table. This `config` function can be really useful when you want to overwrite some attributes of specific tasks within your group.
## What Next?
You now know how to build data models with SAYN. The [next section of this tutorial](tutorial_part3.md) will now go through how to use SAYN for Python processes. This will enable you to leverage SAYN for end-to-end ELT processes or data science tasks!
|
/sayn-0.6.11-py3-none-any.whl/docs/tutorials/tutorial_part2.md
| 0.887418 | 0.980034 |
tutorial_part2.md
|
pypi
|
# Redshift
The Redshift driver depends on [psycopg2](https://www.psycopg.org){target="\_blank"} and can be
installed with:
```bash
pip install "sayn[redshift]"
```
The [Redshift](https://aws.amazon.com/redshift/){target="\_blank"} connector looks for the following parameters:
Parameter | Description | Default
----------------- | ------------------------------------- | ---------------------------------------------
host | Host name or public IP of the cluster | Required on standard user/password connection
port | Connection port | 5439
user | User name used to connect | Required
password | Password for that user | Required on standard user/password connection
cluster_id | Cluster id as registered in AWS |
database | Database in use upon connection | Required
profile | AWS CLI profile | Optional on IAM connection (`default` if left empty)
access_key_id | AWS Access Key ID | Required on Access Key IAM connections
secret_access_key | AWS Secret Access Key | Required on Access Key IAM connections
session_token | AWS Sessions Token | Required on Access Key IAM connections
bucket | S3 Bucket | Required on S3 Copy Loads
bucket_region | S3 Bucket Region | Required if the Bucket is in a different region
For advanced configurations, SAYN will pass other parameters to `create_engine`, so check the
sqlalchemy [psycopg2](https://docs.sqlalchemy.org/en/13/dialects/postgresql.html#module-sqlalchemy.dialects.postgresql.psycopg2){target="\_blank"}
dialect for extra parameters.
## Connection Types
SAYN supports 3 connection models for Redshift: one standard user/password connection and two IAM based.
### Standard User/Password Connection
If you have a user name and password for redshift use the first model and ensure host and password
are specified.
!!! example "settings.yaml"
```yaml
credentials:
redshift-conn:
type: redshift
host: my-redshift-cluster.adhfjlasdljfd.eu-west-1.redshift.amazonaws.com
port: 5439
user: awsuser
password: 'Pas$w0rd' #use quotes to avoid conflict with special characters
dbname: models
```
### Connecting With IAM
With an IAM based connection SAYN uses the AWS API to obtain a temporary password to stablish the
connection, so only `user`, `dbname` and `cluster_id` are required. AWS CLI `profile` can optionally be provided. It will use the `default` profile if not specified.
!!! example "settings.yaml"
```yaml
credentials:
redshift-conn:
type: redshift
cluster_id: my-redshift-cluster
user: awsuser
profile: default
dbname: models
```
For this connection type to work:
* `boto3` needs to be installed in the project virtual environment `pip install boto3`.
* The AWS cli need to be [setup](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/quickstart.html#configuration){target="\_blank"}.
* The `user` and `dbname` still need to be specified (use the database user, not the `IAM:user`).
* `host` and `port` can be skipped and these values will be obtained using boto3's `redshift describe-clusters`.
### Connecting With Access Keys
If the AWS CLI is not configured, access_key_id, secret_access_key and session_token can be provided to allow for the IAM connection. As with the method above, `user`, `dbname` and `cluster_id` are required.
!!! example "settings.yaml"
```yaml
credentials:
redshift-conn:
type: redshift
cluster_id: my-redshift-cluster
user: awsuser
access_key_id: ###
secret_access_key: ###
session_token: ###
dbname: models
```
For this connection type to work:
* `boto3` needs to be installed in the project virtual environment `pip install boto3`.
* The `user` and `dbname` still need to be specified (use the database user, not the `IAM:user`).
* `host` and `port` can be skipped and these values will be obtained using boto3's `redshift describe-clusters`.
## Redshift Specific DDL
### Indexes
Redshift doesn't support index definitions, and so autosql and copy tasks will forbid its definition
in the `ddl` entry in the task definition.
### Sorting
Table sorting can be specified under the `ddl` entry in the task definition
!!! example "tasks/base.yaml"
```yaml
tasks:
f_battles:
type: autosql
file_name: f_battles.sql
materialisation: table
destination:
table: f_battles
table_properties:
sorting:
columns:
- arena_name
- fighter1_name
```
With the above example, the table `f_battles` will be sorted by `arena_name` and `fighter1_name`
using a compound key (Redshift default). The type of sorting can be changed to interleaved.
!!! example "tasks/base.yaml"
```yaml
tasks:
f_battles:
type: autosql
file_name: f_battles.sql
materialisation: table
destination:
table: f_battles
table_properties:
sorting:
type: interleaved
columns:
- arena_name
- fighter1_name
```
For more information, read the latest docs about [SORTKEY](https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html){target="\_blank"}.
### Distribution
We can also specify the type of distribution: even, all or key based. If not specified, the Redshift default is even distribution.
!!! example "tasks/base.yaml"
```yaml
tasks:
f_battles:
type: autosql
file_name: f_battles.sql
materialisation: table
destination:
table: f_battles
table_properties:
distribution: all
```
If we want to distribute the table by a given column use the following:
!!! example "tasks/base.yaml"
```yaml
tasks:
f_battles:
type: autosql
file_name: f_battles.sql
materialisation: table
destination:
table: f_battles
table_properties:
distribution: key(tournament_name)
```
For more information, read the latest docs about
[DISTKEY](https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html){target="\_blank"}.
|
/sayn-0.6.11-py3-none-any.whl/docs/databases/redshift.md
| 0.61555 | 0.891764 |
redshift.md
|
pypi
|
# SaysWho
**SaysWho** is a Python package for identifying and attributing quotes in text. It uses a combination of logic and grammer to find quotes and their speakers, then uses a [coreferencing model](https://explosion.ai/blog/coref) to better clarify who is speaking. It's built on [Textacy](https://textacy.readthedocs.io/en/latest/) and [SpaCy](https://spacy.io/).
## Notes
- Corefencing is an experimental feature not fully integrated into SpaCy, and the current pipeline is built on SpaCy 3.4. I haven't had any problems using it with SpaCy 3.5+, but it takes some finesse to navigate the different versions.
- SaysWho grew out of a larger project for analyzing newspaper articles from Lexis between ~250 and ~2000 words, and it is optimized to navitage the syntax and common errors particular to that text.
- The output of this version is kind of open-ended, and possibly not as useful as it could be. HTML viz is coming, but I'm open to any suggestions about how this could be more useful!
## Installation
Install and update using [pip](https://pip.pypa.io/en/stable/):
```
$ pip install sayswho
```
Install the pre-trained SpaCy coreferencing pipeline.
```
$ pip install https://github.com/explosion/spacy-experimental/releases/download/v0.6.1/en_coreference_web_trf-3.4.0a2-py3-none-any.whl
```
(Optional) If you want to use the most recent version of SpaCy, you will need to update it here. (see [Notes](#notes))
```
$ pip install spacy -U
```
Download the SpaCy large english model.
```
$ spacy download en_core_web_lg
```
## A Simple Example
##### Sample text adapted from [here](https://sports.yahoo.com/nets-jacque-vaughn-looking-forward-150705556.html):
> Nets Coach Jacque Vaughn was optimistic when discussing Ben Simmons's prospects on NBA TV.
>
> “It’s been great, being able to check in with Ben," Vaughn said, via Nets Daily. “I look forward to coaching a healthy Ben Simmons. The team is excited to have him healthy, being part of our program and moving forward.
>
> "He has an innate ability to impact the basketball game on both ends of the floor. So, we missed that in the Philly series and looking forward to it.”
>
> Simmons arrived in Brooklyn during the 2021-22 season, but did not play that year after a back injury. The 26-year-old would make 42 appearances (33 starts) during a tumult-filled season for Brooklyn.
>
> “He is on the court. No setbacks," Vaughn later told reporters about Simmons' workouts. “We’ll continue to see him improve through the offseason.”
#### Instantiate `SaysWho` and run `.attribute` on target text.
```python
from sayswho import SaysWho
sw = SaysWho(text)
```
#### See speaker, cue and content of every quote with `.quotes`.
```python
print(sw.quotes)
```
```
[DQTriple(speaker=[Vaughn], cue=[said], content=“It’s been great, being able to check in with Ben,"),
DQTriple(speaker=[Vaughn], cue=[said], content=“I look forward to coaching a healthy Ben Simmons. The team is excited to have him healthy, being part of our program and moving forward."),
DQTriple(speaker=[Vaughn], cue=[told], content=“He is on the court. No setbacks,"),
DQTriple(speaker=[Vaughn], cue=[told], content=“We’ll continue to see him improve through the offseason.”)]
```
#### See resolved entity clusters with `.clusters`.
```python
print(sw.clusters)
```
```
[[Ben Simmons's,
Ben,
a healthy Ben Simmons,
him,
He,
Simmons,
The 26-year-old,
He,
Simmons'x,
him],
[Nets Coach Jacque Vaughn, Vaughn, I, Vaughn],
[Nets, The team, our, we],
[an innate ability to impact the basketball game on both ends of the floor,
that,
it],
[the 2021-22 season, that year],
[Brooklyn, Brooklyn, We]]
```
#### Use `.print_clusters()` to see unique text in each cluster, easier to read.
```python
sw.print_clusters()
```
```
0 {'Ben', 'He', 'The 26-year-old', 'a healthy Ben Simmons', "Simmons'x", "Ben Simmons's", 'Simmons', 'him'}
1 {'I', 'Nets Coach Jacque Vaughn', 'Vaughn'}
2 {'The team', 'our', 'we', 'Nets'}
3 {'it', 'an innate ability to impact the basketball game on both ends of the floor', 'that'}
4 {'that year', 'the 2021-22 season'}
5 {'Brooklyn', 'We'}
```
#### Quote/cluster matches are saved to `.quote_matches` as `namedtuples`.
```python
for qm in sw.quote_matches:
print(qm)
```
```
QuoteClusterMatch(quote_index=0, cluster_index=1)
QuoteClusterMatch(quote_index=1, cluster_index=1)
QuoteClusterMatch(quote_index=2, cluster_index=1)
QuoteClusterMatch(quote_index=3, cluster_index=1)
```
#### Use `.expand_match()` to view and interpret quote/cluster matches.
```python
sw.expand_match()
```
```
QUOTE : 0
DQTriple(speaker=[Vaughn], cue=[said], content=“It’s been great, being able to check in with Ben,")
CLUSTER : 1
['Nets Coach Jacque Vaughn', 'Vaughn']
QUOTE : 1
DQTriple(speaker=[Vaughn], cue=[said], content=“I look forward to coaching a healthy Ben Simmons. The team is excited to have him healthy, being part of our program and moving forward.")
CLUSTER : 1
['Nets Coach Jacque Vaughn', 'Vaughn']
QUOTE : 2
DQTriple(speaker=[Vaughn], cue=[told], content=“He is on the court. No setbacks,")
CLUSTER : 1
['Nets Coach Jacque Vaughn', 'Vaughn']
QUOTE : 3
DQTriple(speaker=[Vaughn], cue=[told], content=“We’ll continue to see him improve through the offseason.”)
CLUSTER : 1
['Nets Coach Jacque Vaughn', 'Vaughn']
```
#### Use `.render_to_html()` to output an HTML file with your text, highlighted quotes, and associated clusters.
```
sw.render_to_html(article_title="My Article Title")
```
|
/sayswho-0.1.4.tar.gz/sayswho-0.1.4/README.md
| 0.437944 | 0.9699 |
README.md
|
pypi
|
<center>
<a href="/"><img src="/assets/img/logo-color.png" width="100%"></img></a>
<p class="logo-caption">
<span class="red">Make</span> <span class="orange"> music</span> <span class="yellow"> with </span>
<span class="green"> Mac's</span> <code><span class="blue">say</span></code></span> <span class="purple">command</span>.
</p>
</center>
<hr></hr>
# <span class="purple" style='font-size: 16px; font-weight: bold;'>🔊 **Demos** </span>
<code><span class="rainbow-text">saysynth</span></code> sounds like this:
<iframe width="100%" height="300" scrolling="no" frameborder="no" allow="autoplay" src="https://w.soundcloud.com/player/?url=https%3A//api.soundcloud.com/playlists/1519081741&color=%23ff5500&auto_play=true&hide_related=false&show_comments=true&show_user=true&show_reposts=false&show_teaser=true&visual=true"></iframe><div style="font-size: 10px; color: #cccccc;line-break: anywhere;word-break: normal;overflow: hidden;white-space: nowrap;text-overflow: ellipsis; font-family: Interstate,Lucida Grande,Lucida Sans Unicode,Lucida Sans,Garuda,Verdana,Tahoma,sans-serif;font-weight: 100;"><a href="https://soundcloud.com/abelsonlive" title="brian abelson" target="_blank" style="color: #cccccc; text-decoration: none;">brian abelson</a> · <a href="https://soundcloud.com/abelsonlive/sets/saysynth-demos-v100" title="saysynth demos v1.0.0" target="_blank" style="color: #cccccc; text-decoration: none;">saysynth demos v1.0.0</a></div>
You can also purchase recordings of these demos on [bandcamp](https://gltd.bandcamp.com/album/saysynth-demos-v100).
Artwork by [Jeremiah McNair](https://www.instagram.com/jeremiahjmcnair/).
# <span class="purple">🙋 **About** </span>
<hr/>
<code><span class="rainbow-text">saysynth</span></code> is a a synthesizer built on top of Apple's built-in [Speech Synthesis](https://developer.apple.com/library/archive/documentation/UserExperience/Conceptual/SpeechSynthesisProgrammingGuide/SpeechOverview/SpeechOverview.html#//apple_ref/doc/uid/TP40004365-CH3-SW6) framework, first introduced nearly 30 years ago, [when Steve Jobs demoed "Fred"](https://www.youtube.com/embed/NnsDFSXBWoM). <code><span class="rainbow-text">saysynth</span></code> provides utilities for synthesizing notes, chords, arpeggiated melodies, multi-track sequences and more!
## <span class="blue"> **☞ how it works** </span>
At some point in Fred's development, Apple decided they needed to give developers the ability to control the pitch and speaking rate of his voice. These capabilities were provided via a [domain-specific language](https://en.wikipedia.org/wiki/Domain-specific_language) (DSL) Apple created to allow users to control the duration and pitch contours of individual [phonemes](https://en.wikipedia.org/wiki/Phoneme). Eventually, this DSL was expanded to support "Alex" and "Victoria", two other built-in voices. The syntax for this DSL looks like this:
```
AA {D 120; P 176.9:0 171.4:22 161.7:61}
```
Where `AA` is a [valid phoneme](https://developer.apple.com/library/archive/documentation/UserExperience/Conceptual/SpeechSynthesisProgrammingGuide/Phonemes/Phonemes.html#//apple_ref/doc/uid/TP40004365-CH9-SW1), `D 120` is the duration of the phoneme in milliseconds, and ` P 176.9:0 171.4:22 161.7:61` represents the pitch contour for the phoneme in colon-separated pairs of frequency and percentage duration.
<code><span class="rainbow-text">saysynth</span></code> works by harnessing this DSL to create musical passages with the `say` command, mapping notes onto their associated frequencies via [`midi-utils`](https://gitlab.com/gltd/midi-utils/), generating phonemes with pitch contours (as described in [Apple's Speech Synthesis Programming Guide](https://developer.apple.com/library/archive/documentation/UserExperience/Conceptual/SpeechSynthesisProgrammingGuide/FineTuning/FineTuning.html#//apple_ref/doc/uid/TP40004365-CH5-SW7)), and spawning multiple subprocesses in Python to create polyphonic, mostly drone-oriented music. Rudimentary text-to-speech capabilities are provided by [`g2p-en`](https://pypi.org/project/g2p-en/), a library for extracting phonemes from words, though, as of now, some trial and error is necessary to get this sounding intelligible.
# <span class="purple">🛠️ **Installation** </span>
<hr/>
<code><span class="rainbow-text">saysynth</span></code> only works on Mac OS X machines with a working `say` installation. By default, the path to the executable is set to `/usr/bin/say`. You can override that path by setting the environment variable `SAYSYNTH_SAY_EXECUTABLE`.
## <span class="blue"> **☞ via pypi** </span>
First, install `python` via [homebrew](https://brew.sh/) (eg: `brew install python`)
Next, run:
```bash
pip install --user --upgrade saysynth
```
You should now be able to run `sy --help`. This command will also update a currently-installed instance of <code><span class="rainbow-text">saysynth</span></code>.
# <span class="purple">💻 **Command-Line Interface (`sy`)** </span>
<hr/>
<code><span class="rainbow-text">saysynth</span></code> is primarily designed to be used via it's command-line interface (`sy` for short).
You can view all commands (and their corresponding docs) by runnings `sy --help`:
```bash
Usage: sy [OPTIONS] COMMAND [ARGS]...
Make music with the `say` command.
Options:
--help Show this message and exit.
Commands:
chord Generate a polyphonic chord.
version Print the current version of saysynth to the console.
list List all currently running saysynth processes.
midi Synthesize a melody from a fully-monophonic midi file.
stop Stop currently running `say` processes by `sequence`, `track`,...
font Given a scale and other parameters, generate a soundfont of...
arp Generate an arpeggiated melody.
demo Play a built-in demo.
note Generate an individual note.
seq Play a sequence of `chord`, `midi`, `note`, and/or `arp`...
```
Below are basic details on each command's functionality. For a detailed overview on how to make music with saysynth, refer to this [blog post](https://brian./abelson.live/todo).
## <span class="blue"> **☞ sy note** </span>
<hr/>
`sy note` accepts a note name (eg: `C3`) or midi note number (eg: `69`) and generates input to the `say` command which makes a monophonic note.
##### <span class="orange"> examples </span>
Play the note `D#2` randomizing the phoneme each segment by choosing from the `drone`-like phonemes for `Fred`s voice.
```bash
sy note 'D#2' --randomize-phoneme 'Fred:drone' --randomize-segments 'phoneme'
```
You can see the full list of options for this command via `sy note --help`.
## <span class="blue"> **☞ sy arp** </span>
<hr/>
`sy arp` accepts a chord root (eg: `C3`), chord name, and list of styles to generate a melodic, arpeggiated sequence of speech synthesis.
##### <span class="orange"> example </span>
Play an acid-like sequence:
```bash
sy arp 'E0' `# set the root of the arpeggiator to E-1` \
--chord-notes '0,3,5,7,9,12,14,25,31' `# set the notes of the arpeggiator` \
--text '. TEE BEE THREE OH THREE .' `# text to sing` \
--styles 'down,random_shuffle,random_octaves' `# arpeggiator style names come from the midi-utils module.` \
--beat-bpm '130' `# the bpm to use when applying the note-count ` \
--beat-count '1/32' `# the duration of each beat in the arpeggiator` \
--note-bpm '130' `# the bpm to use when applying the note-count` \
--note-count '1/32' `# the duration of each note` \
--segment-bpm '130' `# the bpm to use when applying the segment-count` \
--segment-count '1/32' `# the duration of each phoneme segment` \
--velocities '60,90,127' `# a list of velocities to apply in order to the outputted notes` \
--duration '15000' `# the total duration of the arpeggiator in milliseconds` \
--render-volume-level-per-note '5' `# see docs` \
--render-volume-level-per-segment '5' `# see docs`
```
You can see the full list of options for this command via `sy arp --help`.
## <span class="blue"> **☞ sy chord** </span>
<hr/>
`sy chord` accepts a chord root (eg: `C3`) or midi note number (eg: `69`), a chord name (eg: min6), and other parameters to spawn multiple `say` commands that generate a polyphonic chord.
##### <span class="orange"> example </span>
Play a slowly-evolving minor 6th chord:
```bash
sy chord 'C2' `# the root of the chord` \
--chord 'min6' `# the name of the chord which comes from midi-utils` \
--duration '45000' `# the duration in ms` \
--segment-bpm '155' `# the bpm to use when using --segment-count` \
--segment-count '1/16' `# the duration of each segment in the note` \
--attack '0.5' --decay '0' --sustain '0.5' --release '0.5' `# ADSR settings` \
--randomize-segments 'phoneme' `# phoneme-level randomization settings` \
--voice 'Alex' `# the voice to use, either Fred, Victoria, or Alex` \
--phoneme 'm,OW,EW' `# list of phonemes to randomly pick from` \
--volume-range 0.03 0.33 `# min and mix of volume range`
```
You can see the full list of options for this command via `sy chord --help`.
## <span class="blue"> **☞ sy font** </span>
<hr/>
`sy font` enables the generation of ["soundfonts"](https://www.maniactools.com/soft/midi_converter/soundfonts.shtml) or directories of individual sound files, which can be used in a sampler or DAW to create custom instruments. All synthesis parameters from `sy note` can be modified in `sy font`.
##### <span class="orange"> example </span>
Create a directory of audio files, one per pitch in a specified scale. These can be used to create instruments in a DAW / livecoding environment of your choice:
```bash
mkdir -p tmp `# create an output directory`
sy font \
--scale-start-at 'C2' `# the lowest note of the scale to generate` \
--scale-end-at 'C5' `# the highest note of the scale to generate` \
--key 'C' `# the key of the --scale` \
--scale 'octatonic_whole' `# the scale to use when selecting the notes to generate. (from midi_utils)` \
--output-dir 'tmp/' `# the directory to write each file to` \
--format 'aiff' `# the format of each file` \
--duration '1000' `# the duration of each file`
```
You can see the full list of options for this command via `sy font --help`.
## <span class="blue"> **☞ sy midi** </span>
<hr/>
`sy midi` accepts a midi file and generates pitched phonemes. The midi files must be fully monophonic. (In other words there must not be any overlapping notes. Eventually I'll figure out this issue, but for now there is a helpful error message which indicates the name of an overlapping note and the time at which it occurs. You can then use this information to edit your midi file in whatever DAW you use. There is also no support for multi-track midi files, though that will be less challenging to implement.) `sy midi` then maps the notes in the midi file onto pitched phonemes
##### <span class="orange"> example </span>
To run this example, clone this repository and execute the following command from the root directory. Alternatively, generate your own midi file and replace it's path with `examples/arp.mid`.
Play a high-pitched sequence from a a midi file.
```bash
sy midi 'examples/arp.mid' --phoneme 'm'
```
You can see the full list of options for this command via `sy midi --help`.
## <span class="blue"> **☞ sy seq** </span>
<hr/>
`sy seq` accepts a `yaml` filepath specifying multiple <code><span class="rainbow-text">saysynth</span></code> commands to be concurrently executed.
The `yaml` file might look something like this:
```yaml
name: my-sequence # The name of the sequence. You pass sequence names into `sy stop` or `sy seq stop` to stop all tracks in a sequence at once.
globals: # Configurations shared between all tracks
duration_bpm: 80 # The bpm to use when calculating each tracks duration
duration_count: 128 # The beat count to use when calculating each tracks duration
tracks: # List of tracks / configurations
chord1: # The name of the track. You can use track names to dynamically start/stop each track via the -t flag.
type: chord # The type of this track. Either chord, arp, note, or midi.
options: # Options to pass to the `chord` function.
# These can also be the shortened versions (eg. 'c' instead of 'chord')
root: E3 # The root note of the chord
chord: min6 # The name of the chord
segment_bpm: 80 # The bpm to use when calculating the length of each segment
phoneme: 'm,2OW'
note1:
type: note
options:
phoneme: 'm,2OW'
start_bpm: 80 # The bpm to use when calculating the start time
start_count: 4 # Delay the start of this track by a count of 4
duration_count: 124 # Make the duration of this track shorter than the global setting by a count of 4
note: F#3 # The note to synthesize.
```
Where `globals` define options shared between all `tracks`, each of which have a `type` which corresponds to a <code><span class="rainbow-text">saysynth</span></code> command (`chord`, `midi`, `note`, and/or `arp`) and a set of `options`.
All commands can also generate a `yaml` version of its parameters by appending the `--yaml` option. For instance `sy note E#3 -rp Fred:note --yaml` would generate something like this:
```yaml
tracks:
- note-b2lw2:
type: note
options:
root: 64
randomize_phoneme: Fred:note
```
##### <span class="purple"> **subcommands** </span>
`sy seq` provides multiple subcommands to control the behavior of your sequence. These include:
- `play`: Play the sequence as-is, from beginning to end, respecting any `start_*` configurations.
- `start`: Launch all tracks in the sequence immediately, irregardless of any `start_*` configurations.
- `stop`: Stop one or more tracks currently playing from the sequence.
- `echo`: Print the sequence to the console.
- `render`: Render all tracks in the sequence as separate, monophonic audio-files.
Each of these subcommands accepts command line flags, as well. For instance, `--tracks` allows you to
`play`, `start`, `stop`, or `render` only certain tracks in the sequence. Similarly `--audio-devices` allows
you to filter tracks which are configured to play on certain audio outputs.
`--config-overrides` provides the ability to override global and track-level configurations at runtime by passing in yaml-formatted configurations, eg: `-c '{"foo":"bar"}'`. These configurations can be specified at the track-level by nesting them under the track name, eg: `-c '{"track":{"foo":"bar"}}'`.
You can also override configurations by providing extra command line arguments available to `midi`, `note`, `chord`, rand/or `arp` tracks, eg: `-sd 10` or `--segment- duration 10`. These can be similarly nested by using a `__` separator, eg: `--track__segment-duration 10`. Parameters specified via the --config-overrides option will take precedence over any extra CLI arguments.
Finally, `--output-dir` allows you to specify the directory to write audio files into as a part of the `render` command.
##### <span class="orange"> example </span>
To run this example, clone this repository and execute the following command from the root directory. Alternatively, generate your own `yaml` file and replace it's path with `examples/hello-world.yml`.
Launch a multi-track sequence from a `yaml` file and stop it after 10 seconds:
```bash
sy seq play examples/hello-world.yml
sleep 10
sy seq stop examples/hello-world.yml -t hello_world
```
You can also see an archive of my past <code><span class="rainbow-text">saysynth</span></code> [performances](https://gitlab.com/gltd/saysynth/-/tree/main/performances) for examples of sequences.
You can see the full list of options for this command via `sy seq --help`.
## <span class="blue"> **☞ sy stop** </span>
<hr/>
`sy stop` allows you to stop currently running <code><span class="rainbow-text">saysynth</span></code> processes by `sequences`, `tracks`, `audio_devices`, and/or `parent_pids`.
Omit all the flags to stop all running processes.
##### <span class="orange"> example </span>
Launch a couple notes, wait 10 seconds, and then stop them:
```bash
sy note D#3 -rp Fred:drone
sy note G#3 -rp Fred:drone
sleep 10
echo "stopping all notes now!"
sy stop -t note
```
## <span class="blue"> **☞ sy demo** </span>
<hr/>
`sy demo` is a wrapper for `sy seq` and allows you to play built-in demo sequences. Live recordings of these demos are also for sale on [bandcamp](https://gltd.bandcamp.com/album/saysynth-demos-v100).
##### <span class="orange"> example </span>
Play the built-in demo <code><span class="rainbow-text">fire</span></code>:
```bash
sy demo play fire
```
You can see the full list of built-in demos. for this command via `sy demo --help`.
## <span class="blue"> **☞ sy version** </span>
<hr/>
<code>sy version</code> prints the current version of <code><span class="rainbow-text">saysynth</span></code>
##### <span class="orange"> example </span>
Print the currently-installed version of saysynth:
```
sy version
```
# <span class="purple">🤝🏽 **Development / Contributing** </span>
<hr/>
If you're interested in contributing to <code><span class="rainbow-text">saysynth</span></code> or would like to report [an issue](https://gitlab.com/gltd/saysynth/-/issues), all development is done on [gitlab](https://gitlab.com/gltd/saysynth). You can also reach out to me via `brian [at] abelson [dot] live`. I'm particularly interested in working with interface designers to turn this into a free VST, or something similar.
To install via `git` for local development:
```bash
git clone https://gitlab.com/gltd/saysynth.git # clone this repo
cd saysynth && python -m venv .venv # create a virtualenv with Python 3.9 or higher
source .venv/bin/activate # activate it
make install # install the library
saysynth --help # check if it worked
make test # run the tests
make docs-html && make docs-view # compile and view the docs (via: pdoc)
```
|
/saysynth-1.0.8.tar.gz/saysynth-1.0.8/README.md
| 0.764892 | 0.892516 |
README.md
|
pypi
|
import itertools
import numpy as np
import pandas as pd
from typing import *
from cached_property import cached_property
from pymoo.core.variable import Variable, Real, Integer, Binary, Choice
__all__ = ['ArchDesignSpace', 'ImplicitArchDesignSpace']
class ArchDesignSpace:
"""
Base class for a hierarchical architecture design space definition. The basic information optimization algorithms
need from a design space is as follows:
- Design variable definition: types, bounds, options
- Some way to exhaustively sample all discrete design vectors (aka full factorial; grid)
- Activeness information: for a given matrix of design vectors, a boolean matrix specifying which vars are active
- Imputation: correction of design vectors to canonical design vectors, setting inactive variables to some default
value and correcting invalid variable values
- Optionally calculate the design of the design space: number of valid discrete design vectors
Design variable terminology:
- Continuous: any value between some lower and upper bound (inclusive)
--> for example [0, 1]: 0, 0.25, .667, 1
- Discrete: integer or categorical
- Integer: integer between 0 and some upper bound (inclusive); ordering and distance matters
--> for example [0, 2]: 0, 1, 2
- Categorical: one of n options, encoded as integers; ordering and distance are not defined
--> for example [red, blue, yellow]: red, blue
"""
def __init__(self):
self._choice_value_map = None
self._is_initialized = False
@cached_property
def n_var(self):
return len(self.des_vars)
@cached_property
def des_vars(self) -> List[Variable]:
"""
Returns the defined design variables.
Categorical variables (Choice) are encoded as integer values from 0 to n_opts-1. Use get_categorical_values to
get the associated categorical values.
"""
corr_des_vars = []
self._choice_value_map = choice_value_map = {}
for i_var, var_type in enumerate(self._get_variables()):
if isinstance(var_type, Choice):
choice_value_map[i_var] = var_type.options
var_type = Choice(options=list(range(len(var_type.options))))
elif not isinstance(var_type, (Real, Integer, Binary)):
raise RuntimeError(f'Unsupported variable type: {var_type!r}')
corr_des_vars.append(var_type)
self._is_initialized = True
return corr_des_vars
@cached_property
def is_conditionally_active(self) -> np.ndarray:
"""
Returns a mask specifying for each design variable whether it is conditionally active (i.e. may become inactive
at some point).
"""
is_cond_active = self._is_conditionally_active()
# If not provided, deduce from all discrete design vectors
if is_cond_active is None:
_, is_act_all = self.all_discrete_x
if is_act_all is not None:
return np.any(~is_act_all, axis=0)
raise RuntimeError('Could not deduce is_conditionally_active from all x, '
'implement _is_conditionally_active!')
is_cond_active = np.array(is_cond_active)
if len(is_cond_active) != self.n_var:
raise ValueError(f'is_cont_active should be same length as x: {len(is_cond_active)} != {self.n_var}')
if np.all(is_cond_active):
raise ValueError(f'At least one variable should be nonconditionally active: {is_cond_active}')
return is_cond_active
def get_categorical_values(self, x: np.ndarray, i_dv) -> np.ndarray:
"""Gets the associated categorical variable values for some design variable"""
if not self._is_initialized:
getattr(self, 'des_vars')
if i_dv not in self._choice_value_map:
raise ValueError(f'Design variable is not categorical: {i_dv}')
x_values = x[:, i_dv]
values = x_values.astype(str)
for i_cat, value in enumerate(self._choice_value_map[i_dv]):
values[x_values == i_cat] = value
return values
@cached_property
def xl(self) -> np.ndarray:
"""Vector containing lower bounds of variables"""
xl = np.zeros((self.n_var,))
for i_var, des_var in enumerate(self.des_vars):
if isinstance(des_var, (Real, Integer)):
xl[i_var] = des_var.bounds[0]
return xl
@cached_property
def x_mid(self) -> np.ndarray:
"""Mid-bounds values"""
return .5*(self.xl+self.xu)
@cached_property
def xu(self) -> np.ndarray:
"""Vector containing upper bounds of variables"""
xu = np.empty((self.n_var,))
for i_var, des_var in enumerate(self.des_vars):
if isinstance(des_var, (Real, Integer)):
xu[i_var] = des_var.bounds[1]
elif isinstance(des_var, Binary):
xu[i_var] = 1
elif isinstance(des_var, Choice):
xu[i_var] = len(des_var.options)-1
return xu
@cached_property
def is_int_mask(self) -> np.ndarray:
"""Boolean vector specifying whether each variable is an integer (ordinal) variable"""
return np.array([isinstance(des_var, (Integer, Binary)) for des_var in self.des_vars], dtype=bool)
@cached_property
def is_cat_mask(self) -> np.ndarray:
"""Boolean vector specifying whether each variable is a categorical variable"""
return np.array([isinstance(des_var, Choice) for des_var in self.des_vars], dtype=bool)
@cached_property
def is_discrete_mask(self) -> np.ndarray:
"""Boolean vector specifying whether each variable is a discrete (integer or categorical) variable"""
return self.is_int_mask | self.is_cat_mask
@cached_property
def is_cont_mask(self) -> np.ndarray:
"""Boolean vector specifying whether each variable is a continuous variable"""
return ~self.is_discrete_mask
def correct_x(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Imputes design vectors and returns activeness vectors"""
x_imputed = x.copy()
self.round_x_discrete(x_imputed)
is_active = np.ones(x.shape, dtype=bool)
self.correct_x_impute(x_imputed, is_active)
return x_imputed, is_active
def correct_x_impute(self, x: np.ndarray, is_active: np.ndarray):
"""Corrects and imputes design vectors, assuming that they have already been corrected for discreteness"""
self._correct_x(x, is_active)
self.impute_x(x, is_active)
def round_x_discrete(self, x: np.ndarray):
"""
Ensures that discrete design variables take up integer values.
Rounding is not applied directly, as this would reduce the amount of points assigned to the first and last
options.
Directly rounding:
np.unique(np.round(np.linspace(0, 2, 100)).astype(int), return_counts=True) --> 25, 50, 25 (center bias)
Stretched rounding:
x = np.linspace(0, 2, 100)
xs = x*((np.max(x)+.99)/np.max(x))-.5
np.unique(np.abs(np.round(xs)).astype(int), return_counts=True) --> 34, 33, 33 (evenly distributed)
"""
is_discrete_mask = self.is_discrete_mask
x_discrete = x[:, is_discrete_mask].astype(float)
xl, xu = self.xl[is_discrete_mask], self.xu[is_discrete_mask]
diff = xu-xl
for ix in range(x_discrete.shape[1]):
x_discrete[x_discrete[:, ix] < xl[ix], ix] = xl[ix]
x_discrete[x_discrete[:, ix] > xu[ix], ix] = xu[ix]
x_stretched = (x_discrete-xl)*((diff+.9999)/diff)-.5
x_rounded = (np.round(x_stretched)+xl).astype(int)
x[:, is_discrete_mask] = x_rounded
def impute_x(self, x: np.ndarray, is_active: np.ndarray):
"""
Applies the default imputation to design vectors:
- Sets inactive discrete design variables to 0
- Sets inactive continuous design variables to the mid of their bounds
"""
# Impute inactive discrete design variables: set to their lower bound
for i_dv in np.where(self.is_discrete_mask)[0]:
x[~is_active[:, i_dv], i_dv] = self.xl[i_dv]
# Impute inactive continuous design variables: set to their mid-bound
x_mid = self.x_mid
for i_dv in np.where(self.is_cont_mask)[0]:
x[~is_active[:, i_dv], i_dv] = x_mid[i_dv]
def get_n_valid_discrete(self) -> Optional[int]:
"""Return the number of valid discrete design points (ignoring continuous dimensions); enables calculation of
the imputation ratio"""
n_valid = self._get_n_valid_discrete()
if n_valid is not None:
return n_valid
x_discrete, _ = self.all_discrete_x
if x_discrete is not None:
return x_discrete.shape[0]
def get_n_declared_discrete(self) -> int:
"""Returns the number of declared discrete design points (ignoring continuous dimensions), calculated from the
cartesian product of discrete design variables"""
# Get number of discrete options for each discrete design variable
is_discrete_mask = self.is_discrete_mask
n_opts_discrete = self.xu[is_discrete_mask]-self.xl[is_discrete_mask]+1
if len(n_opts_discrete) == 0:
return 1
return int(np.prod(n_opts_discrete, dtype=float))
@cached_property
def imputation_ratio(self) -> float:
"""
Returns the problem-level imputation ratio, a measure of how hierarchical the problem is. It is calculated
from the product of the discrete and continuous imputation ratios.
"""
return self.discrete_imputation_ratio * self.continuous_imputation_ratio
@cached_property
def discrete_imputation_ratio(self) -> float:
"""
Returns the imputation ratio considering only the discrete design vectors: it represents the ratio between
number of declared discrete dimensions (Cartesian product) and the number of valid discrete design vectors.
A value of 1 indicates no hierarchy, any value higher than 1 means there is hierarchy and the higher the value,
the more difficult it is to randomly sample a valid design vector.
"""
# Get valid design points
n_valid = self.get_n_valid_discrete()
if n_valid is None:
return np.nan
if n_valid == 0:
return 1.
n_declared = self.get_n_declared_discrete()
discrete_imp_ratio = n_declared/n_valid
return discrete_imp_ratio
@cached_property
def continuous_imputation_ratio(self) -> float:
"""
Returns the imputation ratio considering only the continuous design variables: it represents the nr of
continuous dimensions over the mean number of active continuous dimensions, as seen over all possible discrete
design vectors. The higher the number, the less continuous dimensions are active on average. A value of 1
indicates all continuous dimensions are always active.
"""
# Check if we have any continuous dimensions
i_is_cont = np.where(self.is_cont_mask)[0]
if len(i_is_cont) == 0:
return 1.
# Check if mean active continuous dimensions is explicitly defined
n_cont_active_mean = self._get_n_active_cont_mean()
# Get from discrete design vectors
if n_cont_active_mean is None:
x_all, is_active_all = self.all_discrete_x
if x_all is None:
return np.nan
n_cont_active_mean = np.sum(is_active_all[:, i_is_cont]) / x_all.shape[0]
# Calculate imputation ratio from declared / mean_active
n_cont_dim_declared = len(i_is_cont)
return n_cont_dim_declared / n_cont_active_mean
def get_discrete_rates(self, force=False, show=False) -> Optional[pd.DataFrame]:
"""Returns for each discrete value of the discrete design variables, how often the relatively occur over all
possible design vectors. A value of -1 represents an inactive design variable. Results are returned in a
pandas DataFrame with each column representing a design variable.
Also adds a measure of rate diversity: difference between lowest and highest occurring values."""
# Get all discrete design vectors
x_all, is_act_all = self.all_discrete_x
if x_all is None:
if not force:
return
x_all, is_act_all = self.all_discrete_x_by_trial_and_imputation
# Set inactive values to -1
x_merged = (x_all-self.xl).astype(int)
x_merged[~is_act_all] = -1
n = x_merged.shape[0]
# Count the values
is_discrete_mask = self.is_discrete_mask
counts = {}
i_opts = set()
for ix in range(len(is_discrete_mask)):
if not is_discrete_mask[ix]:
counts[f'x{ix}'] = {}
continue
values, counts_i = np.unique(x_merged[:, ix], return_counts=True)
i_opts |= set(values)
counts[f'x{ix}'] = {value: counts_i[iv]/n for iv, value in enumerate(values)}
df = pd.DataFrame(index=sorted(list(i_opts)), columns=list(counts.keys()), data=counts)
df = df.rename(index={val: 'inactive' if val == -1 else f'opt {val}' for val in df.index})
# Add a measure of diversity: the range between the lowest and highest occurring values
diversity = df.max(axis=0)-df.min(axis=0)
if -1 in i_opts:
df_active = df.iloc[1:, :]
col_sums = df_active.sum(axis=0)
df_active /= col_sums
active_diversity = df_active.max(axis=0)-df_active.min(axis=0)
else:
active_diversity = diversity
df = pd.concat([df, pd.Series(diversity, name='diversity').to_frame().T,
pd.Series(active_diversity, name='active-diversity').to_frame().T], axis=0)
max_diversity = np.zeros((len(df),))*np.nan
max_diversity[-2] = df.iloc[-2, :].max()
max_diversity[-1] = df.iloc[-1, :].max()
df = pd.concat([df, pd.Series(index=df.index, data=max_diversity, name='max')], axis=1)
if show:
is_discrete_mask = np.concatenate([self.is_discrete_mask, [True]])
with pd.option_context('display.max_rows', None, 'display.max_columns', None,
'display.expand_frame_repr', False, 'max_colwidth', None):
print(df.iloc[:, is_discrete_mask].replace(np.nan, ''))
return df
def quick_sample_discrete_x(self, n: int) -> Tuple[np.ndarray, np.ndarray]:
"""Sample n design vectors (also return is_active) without generating all design vectors first"""
x, is_active = self._quick_sample_discrete_x(n)
if x.shape[1] != self.n_var or is_active.shape[1] != self.n_var:
raise RuntimeError(f'Inconsistent design vector dimensions: {x.shape[1]} != {self.n_var}')
if x.shape[0] > n:
x = x[:n, :]
is_active = is_active[:n, :]
x = x.astype(float) # Otherwise continuous variables cannot be imputed
self.round_x_discrete(x)
self.impute_x(x, is_active)
return x, is_active
@cached_property
def all_discrete_x(self) -> Tuple[Optional[np.ndarray], Optional[np.ndarray]]:
"""Generate all possible discrete design vectors, if the problem provides this function. Returns both the design
vectors and activeness information. Active continuous variables may have any value within their bounds."""
# Check if this problem implements discrete design vector generation
discrete_x = self._gen_all_discrete_x()
if discrete_x is None:
return None, None
# Impute values (mostly for continuous dimensions)
x, is_active = discrete_x
if x is None or is_active is None:
return None, None
if x.shape[1] != self.n_var or is_active.shape[1] != self.n_var:
raise RuntimeError(f'Inconsistent design vector dimensions: {x.shape[1]} != {self.n_var}')
x = x.astype(float) # Otherwise continuous variables cannot be imputed
self.impute_x(x, is_active)
# Cross-check with numerical estimate
n_valid = self._get_n_valid_discrete()
if n_valid is not None and (n_valid != x.shape[0] or n_valid != is_active.shape[0]):
raise RuntimeError(f'Inconsistent estimation of nr of discrete design vectors: {n_valid} != {x.shape[0]}')
return x, is_active
@cached_property
def all_discrete_x_by_trial_and_imputation(self):
"""
Find all possible discrete design vectors by trail and imputation:
- Generate the Cartesian product of all discrete variables
- Impute design vectors
- Remove duplicates
"""
# First sample only discrete dimensions
opt_values = self.get_exhaustive_sample_values(n_cont=1)
x_cart_product_gen = itertools.product(*opt_values)
is_cont_mask = self.is_cont_mask
is_discrete_mask = ~is_cont_mask
# Create and repair the sampled design vectors in batches
n_batch = 1000
x_discr = np.zeros((0, len(opt_values)))
is_act_discr = np.zeros(x_discr.shape, dtype=bool)
while True:
# Get next batch
x_repair = []
for _ in range(n_batch):
x_next = next(x_cart_product_gen, None)
if x_next is None:
break
x_repair.append(x_next)
if len(x_repair) == 0:
break
x_repair = np.array(x_repair)
# Repair current batch
# print(f'Sampling {x_repair.shape[0]} ({x_repaired.shape[0]} sampled)')
x_repair_input = x_repair
x_repair, is_active = self.correct_x(x_repair)
# Remove repaired points
is_not_repaired = ~np.any(x_repair[:, is_discrete_mask] != x_repair_input[:, is_discrete_mask], axis=1)
x_repair = x_repair[is_not_repaired, :]
is_active = is_active[is_not_repaired, :]
x_discr = np.row_stack([x_discr, x_repair])
is_act_discr = np.row_stack([is_act_discr, is_active.astype(bool)])
# Impute continuous values
self.impute_x(x_discr, is_act_discr)
return x_discr, is_act_discr
def get_exhaustive_sample_values(self, n_cont=5):
# Determine bounds and which design variables are discrete
xl, xu = self.xl, self.xu
is_cont = self.is_cont_mask
# Get values to be sampled for each design variable
return [np.linspace(xl[i], xu[i], n_cont) if is_cont[i] else np.arange(xl[i], xu[i]+1) for i in range(len(xl))]
def _quick_random_sample_discrete_x(self, n: int) -> Tuple[np.ndarray, np.ndarray]:
opt_values = self.get_exhaustive_sample_values(n_cont=1)
x = np.empty((n, self.n_var))
is_discrete_mask = self.is_discrete_mask
for i_dv in range(self.n_var):
if is_discrete_mask[i_dv]:
i_opt_sampled = np.random.choice(len(opt_values[i_dv]), n, replace=True)
x[:, i_dv] = opt_values[i_dv][i_opt_sampled]
is_active = np.ones(x.shape, dtype=bool)
self._correct_x(x, is_active)
return x, is_active
def is_explicit(self) -> bool:
"""Whether this design space is defined explicitly, that is: a model of the design space is available and
correct, and therefore the problem evaluation function never needs to correct any design vector"""
raise NotImplementedError
def _get_variables(self) -> List[Variable]:
"""Returns the list of design variables (pymoo classes)"""
raise NotImplementedError
def _is_conditionally_active(self) -> Optional[List[bool]]:
"""Returns for each design variable whether it is conditionally active (i.e. may become inactive)"""
raise NotImplementedError
def _correct_x(self, x: np.ndarray, is_active: np.ndarray):
"""
Fill the activeness matrix (n x nx) and if needed correct design vectors (n x nx) that are partially inactive.
Imputation of inactive variables is handled automatically.
"""
raise NotImplementedError
def _quick_sample_discrete_x(self, n: int) -> Tuple[np.ndarray, np.ndarray]:
"""Sample n discrete design vectors (also return is_active) without generating all design vectors first"""
raise NotImplementedError
def _get_n_valid_discrete(self) -> Optional[int]:
"""Return the number of valid discrete design points (ignoring continuous dimensions); enables calculation of
the imputation ratio"""
raise NotImplementedError
def _get_n_active_cont_mean(self) -> Optional[int]:
"""
Get the mean number of active continuous dimensions, as seen over all discrete design vectors.
For example, if there are two discrete design vectors like this"
x_discrete x_continuous1 x_continuous2
0 Active Active
1 Active Inactive
Then the mean number of active continuous dimensions is:
3 (total nr of active continuous dimensions) / 2 (number of discrete vectors) = 1.5
"""
raise NotImplementedError
def _gen_all_discrete_x(self) -> Optional[Tuple[np.ndarray, np.ndarray]]:
"""Generate all possible discrete design vectors (if available). Returns design vectors and activeness
information."""
raise NotImplementedError
class ImplicitArchDesignSpace(ArchDesignSpace):
"""An implicit, problem-specific definition of the architecture design space"""
def __init__(self, des_vars: List[Variable], correct_x_func: Callable[[np.ndarray, np.ndarray], None],
is_conditional_func: Callable[[], List[bool]],
n_valid_discrete_func: Callable[[], int] = None, n_active_cont_mean: Callable[[], int] = None,
gen_all_discrete_x_func: Callable[[], Optional[Tuple[np.ndarray, np.ndarray]]] = None):
self._variables = des_vars
self._correct_x_func = correct_x_func
self._is_conditional_func = is_conditional_func
self._n_valid_discrete_func = n_valid_discrete_func
self._n_active_cont_mean = n_active_cont_mean
self._gen_all_discrete_x_func = gen_all_discrete_x_func
super().__init__()
def is_explicit(self) -> bool:
return False
def _get_variables(self) -> List[Variable]:
return self._variables
def _is_conditionally_active(self) -> Optional[List[bool]]:
return self._is_conditional_func()
def _correct_x(self, x: np.ndarray, is_active: np.ndarray):
self._correct_x_func(x, is_active)
def _quick_sample_discrete_x(self, n: int) -> Tuple[np.ndarray, np.ndarray]:
return self._quick_random_sample_discrete_x(n)
def _get_n_valid_discrete(self) -> Optional[int]:
if self._n_valid_discrete_func is not None:
return self._n_valid_discrete_func()
def _get_n_active_cont_mean(self) -> Optional[int]:
if self._n_active_cont_mean is not None:
return self._n_active_cont_mean()
def _gen_all_discrete_x(self) -> Optional[Tuple[np.ndarray, np.ndarray]]:
if self._gen_all_discrete_x_func is not None:
return self._gen_all_discrete_x_func()
|
/sb_arch_opt-1.1.5-py3-none-any.whl/sb_arch_opt/design_space.py
| 0.895893 | 0.619356 |
design_space.py
|
pypi
|
import os
import re
import pickle
import hashlib
import numpy as np
from typing import *
import concurrent.futures
import matplotlib.pyplot as plt
from pymoo.optimize import minimize
from pymoo.core.variable import Real
from pymoo.core.problem import Problem
from pymoo.core.evaluator import Evaluator
from pymoo.visualization.scatter import Scatter
from pymoo.core.initialization import Initialization
from pymoo.algorithms.moo.nsga2 import calc_crowding_distance
from pymoo.util.nds.non_dominated_sorting import NonDominatedSorting
from pymoo.termination.default import DefaultMultiObjectiveTermination, DefaultSingleObjectiveTermination
from sb_arch_opt.sampling import HierarchicalExhaustiveSampling, HierarchicalSampling
__all__ = ['CachedParetoFrontMixin']
class CachedParetoFrontMixin(Problem):
"""Mixin to calculate the Pareto front once by simply running the problem several times using NSGA2, meant for test
problems. Stores the results based on the repr of the main class, so make sure that one is set."""
default_enable_pf_calc = True
def reset_pf_cache(self):
cache_path = self._pf_cache_path()
if os.path.exists(cache_path):
os.remove(cache_path)
# pymoo's implementation of function result caching
if 'cache' in self.__dict__:
for key in ['pareto_front', 'pareto_set']:
if key in self.__dict__['cache']:
del self.__dict__['cache'][key]
def calc_pareto_front(self, **kwargs):
return self._calc_pareto_front(force=True, **kwargs)
def _calc_pareto_front(self, *_, **kwargs):
_, pf = self._calc_pareto_set_front(**kwargs)
return pf
def calc_pareto_set(self, *_, **kwargs):
return self._calc_pareto_set(force=True, **kwargs)
def _calc_pareto_set(self, *_, **kwargs):
ps, _ = self._calc_pareto_set_front(**kwargs)
return ps
def _calc_pareto_set_front(self, *_, pop_size=200, n_gen_min=10, n_repeat=12, n_pts_keep=100, force=False, **__):
if not force and not self.default_enable_pf_calc:
raise RuntimeError('On-demand PF calc is disabled, use calc_pareto_front instead')
# Check if Pareto front has already been cached
cache_path = self._pf_cache_path()
if os.path.exists(cache_path):
with open(cache_path, 'rb') as fp:
return pickle.load(fp)
# Get an approximation of the combinatorial design space size, only relevant if there are no continuous vars
n = 1
xl, xu = self.bounds()
for i, var in enumerate(self.vars.values()):
if isinstance(var, Real):
n = None
break
n *= int(xu[i]-xl[i]+1)
# If the design space is smaller than the number of requested evaluations, simply evaluate all points
if n is not None and n < pop_size*n_gen_min*n_repeat:
pop = HierarchicalExhaustiveSampling().do(self, n)
Evaluator().eval(self, pop)
ps = pop.get('X')
pf = pop.get('F')
i_non_dom = NonDominatedSorting().do(pf, only_non_dominated_front=True)
ps = ps[i_non_dom, :]
pf = pf[i_non_dom, :]
# Otherwise, execute NSGA2 in parallel and merge resulting Pareto fronts
else:
with concurrent.futures.ProcessPoolExecutor() as executor:
futures = [executor.submit(self._run_minimize, pop_size, n_gen_min, i, n_repeat)
for i in range(n_repeat)]
concurrent.futures.wait(futures)
ps = pf = None
for i in range(n_repeat):
res = futures[i].result()
if res.F is None:
continue
if pf is None:
ps = res.X
pf = res.F
else:
pf_merged = np.row_stack([pf, res.F])
i_non_dom = NonDominatedSorting().do(pf_merged, only_non_dominated_front=True)
ps = np.row_stack([ps, res.X])[i_non_dom, :]
pf = pf_merged[i_non_dom, :]
# Reduce size of Pareto front to a predetermined amount to ease Pareto-front-related calculations
if pf is None or pf.shape[0] == 0:
raise RuntimeError('Could not find Pareto front')
pf, i_unique = np.unique(pf, axis=0, return_index=True)
ps = ps[i_unique, :]
if n_pts_keep is not None and pf.shape[0] > n_pts_keep:
for _ in range(pf.shape[0]-n_pts_keep):
crowding_of_front = calc_crowding_distance(pf)
i_max_crowding = np.argsort(crowding_of_front)[1:]
ps = ps[i_max_crowding, :]
pf = pf[i_max_crowding, :]
# Store in cache
os.makedirs(os.path.dirname(cache_path), exist_ok=True)
with open(cache_path, 'wb') as fp:
pickle.dump((ps, pf), fp)
return ps, pf
def _run_minimize(self, pop_size, n_gen, i, n):
from sb_arch_opt.algo.pymoo_interface import get_nsga2
print(f'Running Pareto front discovery {i+1}/{n} ({pop_size} pop, {n_gen} gen): {self.name()}')
robust_period = n_gen
n_max_gen = n_gen*10
n_max_eval = n_max_gen*pop_size
if self.n_obj > 1:
termination = DefaultMultiObjectiveTermination(
xtol=5e-4, cvtol=1e-8, ftol=5e-3, n_skip=5, period=robust_period, n_max_gen=n_max_gen,
n_max_evals=n_max_eval)
else:
termination = DefaultSingleObjectiveTermination(
xtol=1e-8, cvtol=1e-8, ftol=1e-6, period=robust_period, n_max_gen=n_max_gen, n_max_evals=n_max_eval)
result = minimize(self, get_nsga2(pop_size=pop_size), termination=termination, copy_termination=False)
result.history = None
result.algorithm = None
return result
def plot_pf(self: Union[Problem, 'CachedParetoFrontMixin'], show_approx_f_range=True, n_sample=100,
filename=None, show=True, **kwargs):
"""Plot the Pareto front, optionally including randomly sampled points from the design space"""
pf = self.pareto_front(**kwargs)
scatter = Scatter(close_on_destroy=False)
if show_approx_f_range:
scatter.add(self.get_approx_f_range(), s=.1, color='white')
pop = Initialization(HierarchicalSampling()).do(self, n_sample)
Evaluator().eval(self, pop)
scatter.add(pop.get('F'), s=5)
scatter.add(pf)
if filename is not None:
scatter.save(filename)
if show:
scatter.show()
plt.close(scatter.fig)
def get_approx_f_range(self, n_sample=100):
pop = Initialization(HierarchicalSampling()).do(self, n_sample)
Evaluator().eval(self, pop)
f = pop.get('F')
f_max = np.max(f, axis=0)
f_min = np.min(f, axis=0)
return np.array([f_min, f_max])
def _pf_cache_path(self):
class_str = repr(self)
if class_str.startswith('<'):
class_str = self.__class__.__name__
class_str = re.sub('[^0-9a-z]', '_', class_str.lower().strip())
if len(class_str) > 20:
class_str = hashlib.md5(class_str.encode('utf-8')).hexdigest()[:20]
return os.path.expanduser(os.path.join('~', '.arch_opt_pf_cache', '2_'+class_str+'.pkl'))
|
/sb_arch_opt-1.1.5-py3-none-any.whl/sb_arch_opt/pareto_front.py
| 0.784154 | 0.310681 |
pareto_front.py
|
pypi
|
import logging
import warnings
import numpy as np
from typing import Optional, Tuple, List
from scipy.stats.qmc import Sobol
from scipy.spatial import distance
from pymoo.core.repair import Repair
from pymoo.core.variable import Real
from pymoo.core.problem import Problem
from pymoo.core.sampling import Sampling
from pymoo.core.initialization import Initialization
from pymoo.operators.sampling.rnd import FloatRandomSampling
from pymoo.core.duplicate import DefaultDuplicateElimination
from pymoo.operators.sampling.lhs import sampling_lhs_unit
from sb_arch_opt.problem import ArchOptProblemBase, ArchOptRepair
from sb_arch_opt.util import get_np_random_singleton
__all__ = ['HierarchicalExhaustiveSampling', 'HierarchicalSampling',
'get_init_sampler', 'LargeDuplicateElimination', 'TrailRepairWarning']
log = logging.getLogger('sb_arch_opt.sampling')
def get_init_sampler(repair: Repair = None, remove_duplicates=True):
"""Helper function to get an Initialization class with hierarchical sampling"""
if repair is None:
repair = ArchOptRepair()
sampling = HierarchicalSampling(repair=repair, sobol=True)
# Samples are already repaired because we're using the hierarchical samplers
eliminate_duplicates = LargeDuplicateElimination() if remove_duplicates else None
return Initialization(sampling, eliminate_duplicates=eliminate_duplicates)
class TrailRepairWarning(RuntimeWarning):
pass
class HierarchicalExhaustiveSampling(Sampling):
"""Exhaustively samples the design space, taking n_cont samples for each continuous variable.
Can take a long time if the design space is large and the problem doesn't provide a way to generate all discrete
design vectors, and doesn't work well for purely continuous problems."""
def __init__(self, repair: Repair = None, n_cont=5):
super().__init__()
if repair is None:
repair = ArchOptRepair()
self._repair = repair
self._n_cont = n_cont
def _do(self, problem: Problem, n_samples, **kwargs):
return self.do_sample(problem)
def do_sample(self, problem: Problem):
# First sample only discrete dimensions
x_discr, is_act_discr = self.get_all_x_discrete(problem)
# Expand along continuous dimensions
n_cont = self._n_cont
is_cont_mask = self.get_is_cont_mask(problem)
if n_cont > 1 and np.any(is_cont_mask):
x = x_discr
is_act = is_act_discr
for i_dv in np.where(is_cont_mask)[0]:
# Expand when continuous variable is active
is_act_i = is_act[:, i_dv]
n_repeat = np.ones(len(is_act_i), dtype=int)
n_repeat[is_act_i] = n_cont
x = np.repeat(x, n_repeat, axis=0)
is_act = np.repeat(is_act, n_repeat, axis=0)
# Fill sampled values
dv_sampled = np.linspace(problem.xl[i_dv], problem.xu[i_dv], n_cont)
n_dv_rep = np.sum(is_act_i)
dv_sampled_rep = np.tile(dv_sampled, n_dv_rep)
rep_idx = np.cumsum([0]+list(n_repeat))[:-1]
i_repeated_at = np.repeat(rep_idx[is_act_i], n_repeat[is_act_i]) + np.tile(np.arange(n_cont), n_dv_rep)
x[i_repeated_at, i_dv] = dv_sampled_rep
else:
x = x_discr
return x
@staticmethod
def has_cheap_all_x_discrete(problem: Problem):
if isinstance(problem, ArchOptProblemBase):
# Check if the problem itself provides all discrete design vectors
x_discrete, _ = problem.all_discrete_x
if x_discrete is not None:
return True
return False
def get_all_x_discrete(self, problem: Problem):
# Check if the problem itself can provide all discrete design vectors
if isinstance(problem, ArchOptProblemBase):
x_discr, is_act_discr = problem.all_discrete_x
if x_discr is not None:
return x_discr, is_act_discr
# Otherwise, use trail and repair (costly!)
warnings.warn(f'Generating hierarchical discrete samples by trial and repair for {problem!r}! '
f'Consider implementing `_gen_all_discrete_x`', TrailRepairWarning)
return self.get_all_x_discrete_by_trial_and_repair(problem)
@staticmethod
def get_all_x_discrete_by_trial_and_repair(problem: Problem):
if not isinstance(problem, ArchOptProblemBase):
raise RuntimeError('Not implemented for generic Problems!')
return problem.design_space.all_discrete_x_by_trial_and_imputation
@classmethod
def get_exhaustive_sample_values(cls, problem: Problem, n_cont=5):
if isinstance(problem, ArchOptProblemBase):
return problem.design_space.get_exhaustive_sample_values(n_cont=n_cont)
# Determine bounds and which design variables are discrete
xl, xu = problem.bounds()
is_cont = cls.get_is_cont_mask(problem)
# Get values to be sampled for each design variable
return [np.linspace(xl[i], xu[i], n_cont) if is_cont[i] else np.arange(xl[i], xu[i]+1) for i in range(len(xl))]
@staticmethod
def get_is_cont_mask(problem: Problem):
if isinstance(problem, ArchOptProblemBase):
return problem.is_cont_mask
is_cont = np.ones((problem.n_var,), dtype=bool)
if problem.vars is not None:
for i, var in enumerate(problem.vars.values()):
if not isinstance(var, Real):
is_cont[i] = False
return is_cont
@classmethod
def get_n_sample_exhaustive(cls, problem: Problem, n_cont=5):
values = cls.get_exhaustive_sample_values(problem, n_cont=n_cont)
return int(np.prod([len(opts) for opts in values], dtype=float))
def __repr__(self):
return f'{self.__class__.__name__}()'
class HierarchicalSampling(FloatRandomSampling):
"""
Hierarchical mixed-discrete sampling. There are two ways the random sampling is performed:
A: Generate and select:
1. Generate all possible discrete design vectors
2. Separate discrete design vectors by active discrete variables
3. Within each group, uniformly sample discrete design vectors
4. Concatenate and randomize active continuous variables
B: One-shot:
1. Randomly select design variable values
2. Repair/impute design vectors
The first way yields better results, as there is an even chance of selecting every valid discrete design vector,
however it takes more memory and might be too much for very large design spaces.
"""
_n_comb_gen_all_max = 100e3
def __init__(self, repair: Repair = None, sobol=True, seed=None):
if repair is None:
repair = ArchOptRepair()
self._repair = repair
self.sobol = sobol
self.n_iter = 10
super().__init__()
# Simply set the seed on the global numpy instance
if seed is not None:
np.random.seed(seed)
def _do(self, problem, n_samples, **kwargs):
x_sampled, _ = self.sample_get_x(problem, n_samples)
return x_sampled
def sample_get_x(self, problem: ArchOptProblemBase, n_samples: int) -> Tuple[np.ndarray, np.ndarray]:
"""
Sample design points using the hierarchical sampling algorithm and return is_active information.
"""
# Get Cartesian product of all discrete design variables (only available if design space is not too large)
x, is_active = self.get_hierarchical_cartesian_product(problem, self._repair)
x_sampled, is_active = self.randomly_sample(problem, n_samples, self._repair, x, is_active)
return x_sampled, is_active
@classmethod
def get_hierarchical_cartesian_product(cls, problem: Problem, repair: Repair) \
-> Tuple[Optional[np.ndarray], Optional[np.ndarray]]:
# Get values to be sampled for each discrete design variable
exhaustive_sampling = HierarchicalExhaustiveSampling(repair=repair, n_cont=1)
opt_values = exhaustive_sampling.get_exhaustive_sample_values(problem, n_cont=1)
# Get the number of samples in the cartesian product
n_opt_values = int(np.prod([len(values) for values in opt_values], dtype=float))
# If less than some threshold, sample all and then select (this gives a better distribution)
if n_opt_values < cls._n_comb_gen_all_max or exhaustive_sampling.has_cheap_all_x_discrete(problem):
try:
x, is_active = exhaustive_sampling.get_all_x_discrete(problem)
return x, is_active
except MemoryError:
pass
warnings.warn(f'Hierarchical sampling is not possible for {problem!r}, falling back to non-hierarchical '
f'sampling! Consider implementing `_gen_all_discrete_x`', TrailRepairWarning)
return None, None
def randomly_sample(self, problem, n_samples, repair: Repair, x_all: Optional[np.ndarray],
is_act_all: Optional[np.ndarray], lhs=False) -> Tuple[np.ndarray, np.ndarray]:
is_cont_mask = HierarchicalExhaustiveSampling.get_is_cont_mask(problem)
has_x_cont = np.any(is_cont_mask)
xl, xu = problem.xl, problem.xu
sobol = self.sobol
def _choice(n_choose, n_from, replace=True):
return self._choice(n_choose, n_from, replace=replace, sobol=sobol)
# If the population of all available discrete design vectors is available, sample from there
is_active = is_act_all
might_contain_duplicates = False
if x_all is not None:
x, is_active = self._sample_discrete_x(n_samples, is_cont_mask, x_all, is_act_all, sobol=sobol)
# Otherwise, sample discrete vectors randomly
elif isinstance(problem, ArchOptProblemBase):
might_contain_duplicates = True
x, is_active = problem.design_space.quick_sample_discrete_x(n_samples)
else:
might_contain_duplicates = True
opt_values = HierarchicalExhaustiveSampling.get_exhaustive_sample_values(problem, n_cont=1)
x = np.empty((n_samples, problem.n_var))
for i_dv in range(problem.n_var):
if not is_cont_mask[i_dv]:
i_opt_sampled = _choice(n_samples, len(opt_values[i_dv]))
x[:, i_dv] = opt_values[i_dv][i_opt_sampled]
# Randomize continuous variables
if has_x_cont:
if is_active is None:
is_active = np.ones(x.shape, dtype=bool)
nx_cont = len(np.where(is_cont_mask)[0])
if lhs:
x_unit = sampling_lhs_unit(x.shape[0], nx_cont)
elif sobol:
x_unit = self._sobol(x.shape[0], nx_cont)
else:
x_unit = np.random.random((x.shape[0], nx_cont))
x_unit_abs = x_unit*(xu[is_cont_mask]-xl[is_cont_mask])+xl[is_cont_mask]
# Do not overwrite inactive imputed continuous variables
is_inactive_override = ~is_active[:, is_cont_mask]
x_unit_abs[is_inactive_override] = x[:, is_cont_mask][is_inactive_override]
x[:, is_cont_mask] = x_unit_abs
if isinstance(problem, ArchOptProblemBase):
x, is_active = problem.correct_x(x)
else:
x = repair.do(problem, x)
if isinstance(repair, ArchOptRepair) and repair.latest_is_active is not None:
is_active = repair.latest_is_active
if is_active is None:
raise ValueError('Unexpectedly empty is_active!')
if not has_x_cont and might_contain_duplicates:
is_unique = ~LargeDuplicateElimination.eliminate(x)
x = x[is_unique, :]
is_active = is_active[is_unique, :]
return x, is_active
def _sample_discrete_x(self, n_samples: int, is_cont_mask, x_all: np.ndarray, is_act_all: np.ndarray, sobol=False):
if x_all.shape[0] == 0:
raise ValueError('Set of discrete vectors cannot be empty!')
def _choice(n_choose, n_from, replace=True):
return self._choice(n_choose, n_from, replace=replace, sobol=sobol)
# Separate design vectors into groups
groups = self.group_design_vectors(x_all, is_act_all, is_cont_mask)
# Apply weights to the different groups
weights = np.array(self._get_group_weights(groups, is_act_all))
# Uniformly choose from which group to sample
if len(groups) == 1:
selected_groups = np.zeros((n_samples,), dtype=int)
else:
unit_weights = weights/np.sum(weights)
selected_groups = np.zeros((n_samples,), dtype=int)
selected_pos = np.linspace(0, 1, n_samples)
for cum_weight in np.cumsum(unit_weights)[:-1]:
selected_groups[selected_pos > cum_weight] += 1
x = []
is_active = []
has_x_cont = np.any(is_cont_mask)
i_x_sampled = np.ones((x_all.shape[0],), dtype=bool)
for i_grp in range(len(groups)):
i_x_tgt = np.where(selected_groups == i_grp)[0]
if len(i_x_tgt) == 0:
continue
i_x_group = groups[i_grp]
i_from_group = self._sample_discrete_from_group(
x_all[i_x_group, :], is_act_all[i_x_group, :], len(i_x_tgt), _choice, has_x_cont)
x_all_choose = i_x_group[i_from_group]
x.append(x_all[x_all_choose, :])
is_active.append(is_act_all[x_all_choose, :])
i_x_sampled[x_all_choose] = True
x = np.row_stack(x)
is_active = np.row_stack(is_active)
# Uniformly add discrete vectors if there are not enough (can happen if some groups are very small and there
# are no continuous dimensions)
if x.shape[0] < n_samples:
n_add = n_samples-x.shape[0]
x_available = x_all[~i_x_sampled, :]
is_act_available = is_act_all[~i_x_sampled, :]
if n_add < x_available.shape[0]:
i_from_group = _choice(n_add, x_available.shape[0], replace=False)
else:
i_from_group = np.arange(x_available.shape[0])
x = np.row_stack([x, x_available[i_from_group, :]])
is_active = np.row_stack([is_active, is_act_available[i_from_group, :]])
return x, is_active
def _sample_discrete_from_group(self, x_group: np.ndarray, is_act_group: np.ndarray, n_sel: int, choice_func,
has_x_cont: bool) -> np.ndarray:
# Get the number of design points to sample
n_in_group = x_group.shape[0]
n_sel = n_sel
i_x_selected = np.array([], dtype=int)
while n_sel >= n_in_group:
# If we have to sample a multiple of the available points or if we cannot sample duplicate points (because
# there are no continuous variables), return all points
if n_sel == n_in_group or not has_x_cont:
return np.concatenate([i_x_selected, np.arange(n_in_group)])
# Pre-select all points once
i_x_selected = np.concatenate([i_x_selected, np.arange(n_in_group)])
n_sel = n_sel-n_in_group
if n_sel == 1:
i_x_take = choice_func(1, n_in_group, replace=False)
return np.concatenate([i_x_selected, i_x_take])
# Randomly sample several times to get the best distribution of points
i_x_tries = []
metrics = []
for _ in range(self.n_iter):
i_x_try = choice_func(n_sel, n_in_group, replace=False)
i_x_tries.append(i_x_try)
x_try = x_group[i_x_try, :]
dist = distance.cdist(x_try, x_try, metric='cityblock')
np.fill_diagonal(dist, np.nan)
min_dist = np.nanmin(dist)
median_dist = np.nanmean(dist)
metrics.append((min_dist, median_dist))
# Get the distribution with max minimum distance and max mean distance
i_best = sorted(range(len(metrics)), key=metrics.__getitem__)[-1]
return np.concatenate([i_x_selected, i_x_tries[i_best]])
def group_design_vectors(self, x_all: np.ndarray, is_act_all: np.ndarray, is_cont_mask) -> List[np.ndarray]:
# Group by active design variables
is_active_unique, unique_indices = np.unique(is_act_all, axis=0, return_inverse=True)
return [np.where(unique_indices == i)[0] for i in range(len(is_active_unique))]
def _get_group_weights(self, groups: List[np.ndarray], is_act_all: np.ndarray) -> List[float]:
# Weight subgroups by nr of active variables
nr_active = np.sum(is_act_all, axis=1)
avg_nr_active = [np.sum(nr_active[group])/len(group) for group in groups]
return avg_nr_active
@staticmethod
def _sobol(n_samples, n_dims=None) -> np.ndarray:
"""
Sample using a Sobol sequence, which supposedly gives a better distribution of points in a hypercube.
More info: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.qmc.Sobol.html
"""
# Get the power of 2 for generating samples (generating a power of 2 gives points with the lowest discrepancy)
pow2 = int(np.ceil(np.log2(n_samples)))
# Sample points and only return the amount needed
global_rng = get_np_random_singleton()
x = Sobol(d=n_dims or 1, seed=global_rng).random_base2(m=pow2)
x = x[:n_samples, :]
return x[:, 0] if n_dims is None else x
@classmethod
def _choice(cls, n_choose, n_from, replace=True, sobol=False):
if sobol:
return cls._sobol_choice(n_choose, n_from, replace=replace)
return np.random.choice(n_from, n_choose, replace=replace)
@classmethod
def _sobol_choice(cls, n_choose, n_from, replace=True):
"""
Randomly choose n_choose from n_from values, optionally replacing (i.e. allow choosing values multiple times).
If n_choose > n_from
"""
if n_choose <= 0:
return np.zeros((0,), dtype=int)
# If replace (i.e. values can be chosen multiple times)
if replace:
# Generate unit samples
x_unit = cls._sobol(n_choose)
# Scale to nr of possible values and round
return np.round(x_unit*(n_from-.01)-.5).astype(int)
# If we cannot replace, we cannot choose more values than available
if n_choose > n_from:
raise ValueError(f'Nr of values to choose should be lower than available values: {n_choose} > {n_from}')
# Generate unit samples from total nr available
x_unit = cls._sobol(n_from)
# Get sorting arguments: this places each float value on an increasing integer scale
x_unit = x_unit.argsort()
# Only return the amount that we actually want to choose
return x_unit[:n_choose]
def __repr__(self):
return f'{self.__class__.__name__}()'
class LargeDuplicateElimination(DefaultDuplicateElimination):
"""
Duplicate elimination that can deal with a large amount of individuals in a population: instead of creating one big
n_pop x n_pop cdist matrix, it does so in batches, thereby staying fast and saving in memory at the same time.
"""
_n_per_batch = 200
def _do(self, pop, other, is_duplicate):
x = self.func(pop)
other = self.func(other) if other is not None else None
return self.eliminate(x, other, is_duplicate, self.epsilon)
@classmethod
def eliminate(cls, x, other=None, is_duplicate=None, epsilon=1e-16):
# Either compare x to itself or to another x
x = x.copy().astype(float)
if is_duplicate is None:
is_duplicate = np.zeros((x.shape[0],), dtype=bool)
to_itself = False
if other is None:
x_ = x
to_itself = True
else:
x_ = other.copy().astype(float)
# Determine how many batches we need
n_per_batch = cls._n_per_batch
nx = x.shape[0]
n = (x_.shape[0] - 1) if to_itself else x_.shape[0]
if n == 0:
return is_duplicate
n_batches = int(np.ceil(n / n_per_batch))
n_in_batch = np.ones((n_batches,), dtype=int)*n_per_batch
n_in_batch[-1] = n - (n_batches-1)*n_per_batch
for ib, n_batch in enumerate(n_in_batch):
i_compare_to = np.arange(n_batch)+ib*n_per_batch
i = i_compare_to[0]
# Only compare points in the population to other points that are not already marked as duplicate
non_dup = ~is_duplicate
x_check = x[i+1:, ][non_dup[i+1:], :] if to_itself else x[non_dup, :]
if x_check.shape[0] == 0:
break
# Do the comparison: the result is an n_x_check x n_i_compare_to boolean matrix
i_is_dup = distance.cdist(x_check, x_[i_compare_to, :], metric='cityblock') < epsilon
if to_itself:
# Expand to all indices from non-duplicate indices
i_is_dup_expanded = np.zeros((nx-i-1, n_batch), dtype=bool)
i_is_dup_expanded[non_dup[i+1:], :] = i_is_dup
# If we compare to ourselves, we will have a diagonal that is always true, and we should ignore anything
# above the triangle, otherwise the indices are off
i_is_dup_expanded[np.triu_indices(n_batch, k=1)] = False
# Mark as duplicate rows where any of the columns is true
is_duplicate[i+1:][np.any(i_is_dup_expanded, axis=1)] = True
else:
# Expand to all indices from non-duplicate indices
i_is_dup_expanded = np.zeros((nx, n_batch), dtype=bool)
i_is_dup_expanded[non_dup, :] = i_is_dup
# Mark as duplicate rows where any of the columns is true
is_duplicate[np.any(i_is_dup_expanded, axis=1)] = True
return is_duplicate
|
/sb_arch_opt-1.1.5-py3-none-any.whl/sb_arch_opt/sampling.py
| 0.898905 | 0.452475 |
sampling.py
|
pypi
|
import logging
import numpy as np
import pandas as pd
from typing import *
import pymoo.core.variable as var
from pymoo.core.population import Population
from sb_arch_opt.problem import ArchOptProblemBase
try:
from hebo.design_space.param import Parameter
from hebo.design_space.design_space import DesignSpace
from hebo.optimizers.hebo import HEBO
from hebo.optimizers.general import GeneralBO
from hebo.optimizers.abstract_optimizer import AbstractOptimizer
HAS_HEBO = True
except ImportError:
HAS_HEBO = False
__all__ = ['HAS_HEBO', 'check_dependencies', 'HEBOArchOptInterface']
log = logging.getLogger('sb_arch_opt.hebo')
def check_dependencies():
if not HAS_HEBO:
raise ImportError(f'HEBO dependencies not installed: python setup.py install[hebo]')
class HEBOArchOptInterface:
"""
Interface class to HEBO algorithm.
"""
def __init__(self, problem: ArchOptProblemBase, n_init: int, seed: int = None):
check_dependencies()
self._problem = problem
self._n_init = n_init
self._optimizer = None
self._design_space = None
self._seed = seed
if seed is not None:
np.random.seed(seed)
@property
def problem(self):
return self._problem
@property
def n_batch(self):
n_batch = self._problem.get_n_batch_evaluate()
return n_batch if n_batch is not None else 1
@property
def design_space(self) -> 'DesignSpace':
if self._design_space is None:
hebo_var_defs = []
for i, var_def in enumerate(self._problem.des_vars):
name = f'x{i}'
if isinstance(var_def, var.Real):
hebo_var_defs.append({'name': name, 'type': 'num', 'lb': var_def.bounds[0], 'ub': var_def.bounds[1]})
elif isinstance(var_def, var.Integer):
hebo_var_defs.append({'name': name, 'type': 'int', 'lb': var_def.bounds[0], 'ub': var_def.bounds[1]})
elif isinstance(var_def, var.Binary):
hebo_var_defs.append({'name': name, 'type': 'bool'})
elif isinstance(var_def, var.Choice):
hebo_var_defs.append({'name': name, 'type': 'cat', 'categories': var_def.options})
else:
raise RuntimeError(f'Unsupported design variable type: {var_def!r}')
self._design_space = DesignSpace().parse(hebo_var_defs)
return self._design_space
@property
def optimizer(self) -> 'AbstractOptimizer':
if self._optimizer is None:
if self._problem.n_obj == 1 and self._problem.n_ieq_constr == 0:
self._optimizer = HEBO(self.design_space, model_name='gpy', rand_sample=self._n_init,
scramble_seed=self._seed)
else:
self._optimizer = GeneralBO(self.design_space, num_obj=self._problem.n_obj,
num_constr=self._problem.n_ieq_constr, rand_sample=self._n_init,
model_config={'num_epochs': 100})
return self._optimizer
@property
def pop(self) -> Population:
x = self._to_x(self.optimizer.X)
y: np.ndarray = self.optimizer.y
f = y[:, :self._problem.n_obj]
kwargs = {'X': x, 'F': f}
if self._problem.n_ieq_constr > 0:
kwargs['G'] = y[:, self._problem.n_obj:]
return Population.new(**kwargs)
def optimize(self, n_infill: int):
"""Run the optimization loop for n_infill infill points (on top on the initialization points)"""
n_total = self._n_init+n_infill
evaluated = 0
while evaluated < n_total:
x = self.ask()
out = self._problem.evaluate(x, return_as_dictionary=True)
x_eval = out['X']
f = out['F']
g = out['G'] if self._problem.n_ieq_constr > 0 else None
self.tell(x_eval, f, g)
evaluated += x_eval.shape[0]
def ask(self) -> np.ndarray:
"""Returns n_batch infill points"""
x_df = self.optimizer.suggest(n_suggestions=self.n_batch)
return self._to_x(x_df)
def tell(self, x: np.ndarray, f: np.ndarray, g: np.ndarray = None):
"""Updates optimizer with evaluated design points"""
y = f
if g is not None:
y = np.column_stack([f, g])
params: List['Parameter'] = self.design_space.paras.values()
x_df = pd.DataFrame({f'x{i}': param.inverse_transform(x[:, i]) for i, param in enumerate(params)})
self.optimizer.observe(x_df, y)
def _to_x(self, x_df: pd.DataFrame) -> np.ndarray:
params: List['Parameter'] = self.design_space.paras.values()
return np.column_stack([param.transform(x_df[f'x{i}']) for i, param in enumerate(params)])
|
/sb_arch_opt-1.1.5-py3-none-any.whl/sb_arch_opt/algo/hebo_interface/algo.py
| 0.819641 | 0.181444 |
algo.py
|
pypi
|
import numpy as np
import pymoo.core.variable as var
from pymoo.core.population import Population
from sb_arch_opt.problem import ArchOptProblemBase
try:
from ax import ParameterType, RangeParameter, ChoiceParameter, SearchSpace, Experiment, OptimizationConfig, \
Objective, MultiObjective, OutcomeConstraint, Metric, ComparisonOp, MultiObjectiveOptimizationConfig, \
Trial, Data
from ax.service.managed_loop import OptimizationLoop
from ax.modelbridge.dispatch_utils import choose_generation_strategy
HAS_BOTORCH = True
except ImportError:
HAS_BOTORCH = False
__all__ = ['AxInterface', 'check_dependencies']
def check_dependencies():
if not HAS_BOTORCH:
raise ImportError(f'BoTorch/Ax dependencies not installed: python setup.py install[botorch]')
class AxInterface:
"""
Class handling interfacing between ArchOptProblemBase and the Ax optimization loop, based on:
https://ax.dev/tutorials/gpei_hartmann_developer.html
Restart can be implemented based on:
- https://ax.dev/tutorials/generation_strategy.html#3B.-JSON-storage
- https://ax.dev/tutorials/gpei_hartmann_service.html#7.-Save-/-reload-optimization-to-JSON-/-SQL
Failed trails can be marked (as a primitive way of dealing with hidden constraints):
- https://ax.dev/tutorials/gpei_hartmann_service.html#Special-Cases
"""
def __init__(self, problem: ArchOptProblemBase):
check_dependencies()
self._problem = problem
def get_optimization_loop(self, n_init: int, n_infill: int, seed: int = None) -> 'OptimizationLoop':
experiment = self.get_experiment()
n_eval_total = n_init+n_infill
generation_strategy = choose_generation_strategy(
search_space=experiment.search_space,
experiment=experiment,
num_trials=n_eval_total,
num_initialization_trials=n_init,
max_parallelism_override=self._problem.get_n_batch_evaluate(),
)
return OptimizationLoop(
experiment=experiment,
evaluation_function=self.evaluate,
total_trials=n_eval_total,
generation_strategy=generation_strategy,
random_seed=seed,
)
def get_search_space(self) -> 'SearchSpace':
"""Gets the search space as defined by the underlying problem"""
parameters = []
for i, var_def in enumerate(self._problem.des_vars):
name = f'x{i}'
if isinstance(var_def, var.Real):
parameters.append(RangeParameter(
name=name, parameter_type=ParameterType.FLOAT, lower=var_def.bounds[0], upper=var_def.bounds[1]))
elif isinstance(var_def, var.Integer):
parameters.append(RangeParameter(
name=name, parameter_type=ParameterType.INT, lower=var_def.bounds[0], upper=var_def.bounds[1]))
elif isinstance(var_def, var.Binary):
parameters.append(ChoiceParameter(
name=name, parameter_type=ParameterType.INT, values=[0, 1], is_ordered=True))
elif isinstance(var_def, var.Choice):
parameters.append(ChoiceParameter(
name=name, parameter_type=ParameterType.INT, values=var_def.options, is_ordered=False))
else:
raise RuntimeError(f'Unsupported design variable type: {var_def!r}')
return SearchSpace(parameters)
def get_optimization_config(self) -> 'OptimizationConfig':
"""Gets the optimization config (objectives and constraints) as defined by the underlying problem"""
if self._problem.n_eq_constr > 0:
raise RuntimeError('Currently equality constraints are not supported!')
constraints = [OutcomeConstraint(Metric(name=f'g{i}'), ComparisonOp.LEQ, bound=0., relative=False)
for i in range(self._problem.n_ieq_constr)]
if self._problem.n_obj == 1:
return OptimizationConfig(
objective=Objective(Metric(name='f0'), minimize=True),
outcome_constraints=constraints,
)
objective = MultiObjective(objectives=[
Objective(Metric(name=f'f{i}'), minimize=True) for i in range(self._problem.n_obj)])
return MultiObjectiveOptimizationConfig(
objective=objective,
outcome_constraints=constraints,
)
def get_experiment(self) -> 'Experiment':
return Experiment(
name=repr(self._problem),
search_space=self.get_search_space(),
optimization_config=self.get_optimization_config(),
)
def evaluate(self, parameterization: dict, _=None) -> dict:
x = np.array([[parameterization[f'x{i}'] for i in range(self._problem.n_var)]])
out = self._problem.evaluate(x, return_as_dictionary=True)
metrics = {}
for i in range(self._problem.n_obj):
metrics[f'f{i}'] = out['F'][0, i]
for i in range(self._problem.n_ieq_constr):
metrics[f'g{i}'] = out['G'][0, i]
return metrics
def get_population(self, opt_loop: 'OptimizationLoop') -> Population:
x, f, g = [], [], []
data_by_trial = opt_loop.experiment.data_by_trial
trial: 'Trial'
for trial in opt_loop.experiment.trials.values():
x.append([trial.arm.parameters[f'x{i}'] for i in range(self._problem.n_var)])
data: 'Data' = list(data_by_trial[trial.index].values())[0]
values = data.df.set_index('metric_name')['mean']
f.append([values[f'f{i}'] for i in range(self._problem.n_obj)])
g.append([values[f'g{i}'] for i in range(self._problem.n_ieq_constr)])
kwargs = {'X': np.array(x), 'F': np.array(f)}
if self._problem.n_ieq_constr > 0:
kwargs['G'] = np.array(g)
return Population.new(**kwargs)
|
/sb_arch_opt-1.1.5-py3-none-any.whl/sb_arch_opt/algo/botorch_interface/algo.py
| 0.715523 | 0.306268 |
algo.py
|
pypi
|
import os
import logging
import pathlib
import numpy as np
from typing import *
import pymoo.core.variable as var
from sb_arch_opt.util import capture_log
from pymoo.core.population import Population
from sb_arch_opt.problem import ArchOptProblemBase
# https://github.com/explosion/spaCy/issues/7664#issuecomment-825501808
# Needed to solve "Fatal Python error: aborted"!
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
try:
from trieste.bayesian_optimizer import BayesianOptimizer, TrainableProbabilisticModelType, FrozenRecord, Record
from trieste.observer import Observer, MultiObserver, Dataset, OBJECTIVE
from trieste.space import SearchSpace, Box, DiscreteSearchSpace, TaggedProductSearchSpace
from trieste.models.gpflow import build_gpr, GaussianProcessRegression, build_vgp_classifier, \
VariationalGaussianProcess
from trieste.models.optimizer import BatchOptimizer
from trieste.models.interfaces import TrainableProbabilisticModel, ProbabilisticModel
from trieste.acquisition.rule import AcquisitionRule, EfficientGlobalOptimization
from trieste.acquisition import ProbabilityOfFeasibility, ExpectedConstrainedImprovement, \
ExpectedHypervolumeImprovement, ExpectedConstrainedHypervolumeImprovement, ExpectedImprovement, Product, \
SingleModelAcquisitionBuilder
import tensorflow as tf
from dill import UnpicklingError
HAS_TRIESTE = True
except ImportError:
class BayesianOptimizer:
pass
class SingleModelAcquisitionBuilder:
pass
HAS_TRIESTE = False
OBJECTIVE = 'OBJECTIVE'
__all__ = ['HAS_TRIESTE', 'check_dependencies', 'ArchOptBayesianOptimizer', 'OBJECTIVE', 'CONSTR_PREFIX',
'ProbabilityOfValidity']
log = logging.getLogger('sb_arch_opt.trieste')
CONSTR_PREFIX = 'G'
FAILED = 'FAILED'
def check_dependencies():
if not HAS_TRIESTE:
raise ImportError(f'Trieste dependencies not installed: python setup.py install[trieste]')
class ArchOptBayesianOptimizer(BayesianOptimizer):
"""
Bayesian optimization loop controller with some extra helper functions.
Use the `run_optimization` function to run the DOE and infill loops.
Use `initialize_from_previous` to initialize the optimization state from previously saved results.
Optimization loop: https://secondmind-labs.github.io/trieste/1.0.0/notebooks/expected_improvement.html
Restart: https://secondmind-labs.github.io/trieste/1.0.0/notebooks/recovering_from_errors.html
Constraints: https://secondmind-labs.github.io/trieste/1.0.0/notebooks/inequality_constraints.html
Multi-objective: https://secondmind-labs.github.io/trieste/1.0.0/notebooks/multi_objective_ehvi.html
Hidden constraints: https://secondmind-labs.github.io/trieste/1.0.0/notebooks/failure_ego.html
Ask-tell: https://secondmind-labs.github.io/trieste/1.0.0/notebooks/ask_tell_optimization.html
"""
def __init__(self, problem: ArchOptProblemBase, n_init: int, n_infill: int, pof=.5,
rule: 'AcquisitionRule' = None, seed: int = None):
check_dependencies()
self._problem = problem
self.pof = pof
self._rule = rule
self.n_init = n_init
self.n_infill = n_infill
self.eval_might_fail = problem.might_have_hidden_constraints()
observer = ArchOptObserver(self.evaluate)
search_space = self.get_search_space(problem)
super().__init__(observer, search_space)
self._results_folder = None
self._datasets = None
self._models = None
self._state = None
if seed is not None:
np.random.seed(seed)
tf.random.set_seed(seed)
@property
def search_space(self):
return self._search_space
@property
def observer(self) -> 'MultiObserver':
return self._observer
@property
def rule(self) -> 'AcquisitionRule':
if self._rule is None:
self._rule = self.get_acquisition_rule(pof=self.pof)
return self._rule
@rule.setter
def rule(self, rule: 'AcquisitionRule'):
self._rule = rule
@property
def is_constrained(self):
return self._problem.n_ieq_constr > 0
def initialize_from_previous(self, results_folder: str):
capture_log()
# Load from problem state
population = self._problem.load_previous_results(results_folder)
if population is not None:
self._datasets = datasets = self._to_datasets(population)
self._models = self.get_models(datasets)
self._state = None
log.info(f'Previous results loaded from problem results: {len(population)} design points')
return
# Load from optimizer state
state_path = self.get_state_path(results_folder)
if os.path.exists(state_path):
try:
results = FrozenRecord(pathlib.Path(state_path)).load()
except UnpicklingError:
log.exception(f'Could not load previous state from: {state_path}')
return
self._datasets = datasets = results.datasets
self._models = results.models
self._state = results.acquisition_state
log.info(f'Previous results loaded from optimizer state: {self._get_n_points(datasets)} design points')
return
log.info('No previous results found')
def run_optimization(self, results_folder=None) -> 'Record':
"""Runs a full optimization, including initial DOE"""
capture_log()
self._results_folder = results_folder
# Check how many points we already have available
n_available = 0
if self._datasets is not None:
n_available = self._get_n_points(self._datasets)
log.info(f'Starting optimization with {n_available} points already available')
# Run (part of) DOE if needed
if n_available < self.n_init:
log.info(f'Running DOE: {self.n_init - n_available} points ({self.n_init} total)')
datasets = self._run_doe(self.n_init - n_available)
models = self.get_models(datasets)
self._exec_callback(datasets, models)
else:
log.info(f'Skipping DOE, enough points available: {n_available} >= {self.n_init}')
datasets = self._datasets
models = self._models
# Run (part of) optimization
n_available = self._get_n_points(datasets)
if n_available < self.n_init+self.n_infill:
n_infill = self.n_infill - (n_available-self.n_init)
log.info(f'Running optimization: {n_infill} infill points')
opt_results = self.optimize(
n_infill, datasets, models, self.rule, self._state,
early_stop_callback=self._exec_callback, track_state=False)
record = opt_results.final_result.unwrap()
self._datasets = record.datasets
self._models = record.models
self._state = record.acquisition_state
else:
record = Record(datasets, models, acquisition_state=self._state)
log.info(f'Skipping infill, enough points available: {n_available} >= {self.n_init}+{self.n_infill}')
# Store final problem results
if self._results_folder is not None:
self._exec_callback(self._datasets, self._models, self._state)
return record
def _exec_callback(self, datasets, models, acquisition_state=None):
self._datasets = datasets
self._models = models
self._state = acquisition_state
# Store intermediate results if requested
if self._results_folder is not None:
# Store optimizer state
Record(datasets, models, acquisition_state).save(self.get_state_path(self._results_folder))
# Store problem state
self._problem.store_results(self._results_folder)
return False
def _run_doe(self, n: int):
return self.observer(self.search_space.sample(n))
def get_models(self, datasets):
# https://secondmind-labs.github.io/trieste/1.0.0/notebooks/inequality_constraints.html#Modelling-the-two-functions
search_space = self.search_space
models = {}
for tag, dataset in datasets.items():
# https://secondmind-labs.github.io/trieste/1.0.0/notebooks/failure_ego.html#Build-GPflow-models
if tag == FAILED:
classifier = build_vgp_classifier(dataset, search_space, noise_free=True)
models[tag] = VariationalGaussianProcess(
classifier, BatchOptimizer(tf.optimizers.Adam(1e-3)), use_natgrads=True)
continue
# https://secondmind-labs.github.io/trieste/1.0.0/notebooks/expected_improvement.html#Model-the-objective-function
gpr = build_gpr(dataset, search_space, likelihood_variance=1e-7)
models[tag] = GaussianProcessRegression(gpr, num_kernel_samples=100)
return models
@staticmethod
def _get_n_points(datasets: Mapping[Hashable, 'Dataset']) -> int:
if FAILED in datasets:
return datasets[FAILED].query_points.shape[0]
if OBJECTIVE not in datasets:
return 0
return datasets[OBJECTIVE].query_points.shape[0]
@staticmethod
def get_state_path(results_folder):
return os.path.join(results_folder, 'trieste_state')
def get_acquisition_rule(self, pof=.5) -> 'AcquisitionRule':
"""
Builds the acquisition rule based on whether the problem is single- or multi-objective and constrained or not:
https://secondmind-labs.github.io/trieste/1.0.0/notebooks/inequality_constraints.html#Define-the-acquisition-process
https://secondmind-labs.github.io/trieste/1.0.0/notebooks/multi_objective_ehvi.html#Define-the-acquisition-function
"""
if self._problem.n_eq_constr > 0:
raise RuntimeError('Trieste currently does not support equality constraints')
if self.is_constrained:
# Reduce the PoF rules into one
# https://secondmind-labs.github.io/trieste/1.0.0/notebooks/inequality_constraints.html#Constrained-optimization-with-more-than-one-constraint
pof_builders = [ProbabilityOfFeasibility(threshold=pof).using(f'{CONSTR_PREFIX}{ig}')
for ig in range(self._problem.n_ieq_constr)]
pof_builder = pof_builders[0] if len(pof_builders) == 1 else Product(*pof_builders)
if self._problem.n_obj == 1:
acq_builder = ExpectedConstrainedImprovement(OBJECTIVE, pof_builder)
else:
acq_builder = ExpectedConstrainedHypervolumeImprovement(OBJECTIVE, pof_builder)
else:
if self._problem.n_obj == 1:
acq_builder = ExpectedImprovement().using(OBJECTIVE)
else:
acq_builder = ExpectedHypervolumeImprovement().using(OBJECTIVE)
# Deal with hidden constraints in the acquisition function
if self.eval_might_fail:
pov = ProbabilityOfValidity().using(FAILED)
acq_builder = Product(acq_builder, pov)
return EfficientGlobalOptimization(acq_builder)
@staticmethod
def get_search_space(problem: ArchOptProblemBase) -> 'SearchSpace':
box_buffer = []
search_space: Optional['SearchSpace'] = None
def _append_box():
nonlocal box_buffer
if len(box_buffer) == 0:
return
bounds = np.array(box_buffer)
_append_space(Box(lower=tf.constant(bounds[:, 0], dtype=tf.float64),
upper=tf.constant(bounds[:, 1], dtype=tf.float64)))
box_buffer = []
def _append_space(space: 'SearchSpace'):
nonlocal search_space
if search_space is None:
search_space = space
else:
search_space = search_space * space # Creates a TaggedProductSearchSpace
for i, var_def in enumerate(problem.des_vars):
# We can have multiple real dimensions in one part of the design space, so we accumulate before actually
# creating a Box (a continuous search space)
if isinstance(var_def, var.Real):
box_buffer.append(var_def.bounds)
continue
# Until there is a discrete dimension, which we add directly
_append_box()
if isinstance(var_def, var.Integer):
points = np.arange(var_def.bounds[0], var_def.bounds[1]+1, dtype=int)
elif isinstance(var_def, var.Binary):
points = np.array([0, 1])
elif isinstance(var_def, var.Choice):
points = np.arange(0, len(var_def.options), dtype=int)
else:
raise RuntimeError(f'Unsupported design variable type: {var_def!r}')
discrete_search_space = DiscreteSearchSpace(tf.constant(np.array([points]).T, dtype=tf.float64))
_append_space(discrete_search_space)
_append_box()
if search_space is None:
raise RuntimeError('Problem contains no design variables!')
return search_space
def evaluate(self, x: 'tf.Tensor') -> Dict[str, 'Dataset']:
out = self._problem.evaluate(x.numpy(), return_as_dictionary=True)
return self._process_evaluation_results(out)
def _to_datasets(self, population: Population) -> Dict[str, 'Dataset']:
return self._process_evaluation_results(population)
def _process_evaluation_results(self, pop_or_dict: Union[dict, Population]) -> Dict[str, 'Dataset']:
is_constrained = self.is_constrained
# Separate failed evaluations (hidden constraints)
is_failed = self._problem.get_failed_points(pop_or_dict)
is_ok = ~is_failed
x_all = pop_or_dict.get('X')
x_out = x_all[is_ok, :]
f = pop_or_dict.get('F')[is_ok, :]
g = pop_or_dict.get('G')[is_ok, :] if is_constrained else None
x_ts = tf.constant(x_out, dtype=tf.float64)
datasets = {
OBJECTIVE: Dataset(x_ts, tf.constant(f, dtype=tf.float64)),
FAILED: Dataset(tf.constant(x_all, dtype=tf.float64), tf.cast(is_failed[:, None], dtype=tf.float64)),
}
if is_constrained:
for ig in range(self._problem.n_ieq_constr):
datasets[f'{CONSTR_PREFIX}{ig}'] = Dataset(x_ts, tf.constant(g[:, [ig]], dtype=tf.float64))
return datasets
def to_population(self, datasets: Dict[Hashable, 'Dataset']) -> Population:
obj_dataset = datasets[OBJECTIVE]
x = obj_dataset.query_points.numpy()
kwargs = {
'X': x,
'F': obj_dataset.observations.numpy(),
}
if self.is_constrained:
g = np.zeros((x.shape[0], self._problem.n_ieq_constr))
for ig in range(self._problem.n_ieq_constr):
g[:, ig] = datasets[f'{CONSTR_PREFIX}{ig}'].observations.numpy()
return Population.new(**kwargs)
class ArchOptObserver:
"""
The observer function that evaluates each architecture, according to the tagged observer pattern:
https://secondmind-labs.github.io/trieste/1.0.0/notebooks/inequality_constraints.html
Support for failed evaluations based on:
https://secondmind-labs.github.io/trieste/1.0.0/notebooks/failure_ego.html#Define-the-data-sets
Class needed to prevent overflow in BayesianOptimizer.__repr__
"""
def __init__(self, func):
self._func = func
def __call__(self, x: 'tf.Tensor') -> Dict[str, 'Dataset']:
return self._func(x)
def __repr__(self):
return f'{self.__class__.__name__}()'
class ProbabilityOfValidity(SingleModelAcquisitionBuilder):
"""
Acquisition function for dealing with failed regions (hidden constraints):
https://secondmind-labs.github.io/trieste/1.0.0/notebooks/failure_ego.html#Create-a-custom-acquisition-function
"""
def prepare_acquisition_function(self, model, dataset=None):
def acquisition(at):
mean, _ = model.predict_y(tf.squeeze(at, -2))
return mean
return acquisition
|
/sb_arch_opt-1.1.5-py3-none-any.whl/sb_arch_opt/algo/trieste_interface/algo.py
| 0.882231 | 0.276794 |
algo.py
|
pypi
|
import timeit
import numpy as np
from typing import *
from enum import Enum
from scipy.stats import norm
from scipy.special import ndtr
from sb_arch_opt.problem import ArchOptProblemBase
from sb_arch_opt.algo.arch_sbo.hc_strategy import HiddenConstraintStrategy
from pymoo.core.problem import Problem
from pymoo.core.population import Population
from pymoo.core.algorithm import filter_optimum
from pymoo.util.normalization import Normalization
from pymoo.util.nds.non_dominated_sorting import NonDominatedSorting
from pymoo.algorithms.moo.nsga2 import RankAndCrowdingSurvival, calc_crowding_distance
__all__ = ['SurrogateInfill', 'FunctionEstimateInfill', 'ConstrainedInfill', 'FunctionEstimateConstrainedInfill',
'ExpectedImprovementInfill', 'MinVariancePFInfill', 'ConstraintStrategy', 'MeanConstraintPrediction',
'ProbabilityOfFeasibility', 'ProbabilityOfImprovementInfill', 'LowerConfidenceBoundInfill',
'MinimumPoIInfill', 'EnsembleInfill', 'IgnoreConstraints', 'get_default_infill', 'HCInfill',
'ConstraintAggregation']
try:
from smt.surrogate_models.surrogate_model import SurrogateModel
from smt.surrogate_models.krg_based import KrgBased
except ImportError:
pass
def get_default_infill(problem: ArchOptProblemBase, n_parallel: int = None, min_pof: float = None,
g_aggregation: 'ConstraintAggregation' = None) -> Tuple['ConstrainedInfill', int]:
"""
Get the default infill criterion according to the following logic:
- If evaluations can be run in parallel:
- Single-objective: Ensemble of EI, LCB, PoI with n_batch = n_parallel
- Multi-objective: Ensemble of MPoI, MEPoI with n_batch = n_parallel
- If no parallelization possible:
- Single-objective
- Continuous: Mean function estimate
- Mixed-discrete: Ensemble of EI, LCB, PoI with n_batch = 1
- Multi-objective: Ensemble of MPoI, MEPoI with n_batch = 1
- Set Probability of Feasibility as constraint handling technique if min_pof != .5, otherwise use g-mean prediction
Returns the infill and recommended infill batch size.
"""
# Determine number of evaluations that can be run in parallel
if n_parallel is None:
n_parallel = problem.get_n_batch_evaluate()
if n_parallel is None:
n_parallel = 1
so_ensemble = [ExpectedImprovementInfill(), LowerConfidenceBoundInfill(), ProbabilityOfImprovementInfill()]
mo_ensemble = [MinimumPoIInfill(), MinimumPoIInfill(euclidean=True)]
def _get_infill():
# Use the ensemble infill if parallel
if n_parallel > 1:
return EnsembleInfill(so_ensemble if problem.n_obj == 1 else mo_ensemble), n_parallel
# Ensemble infill with 1 per iteration if multi-objective
if problem.n_obj > 1:
return EnsembleInfill(mo_ensemble), 1
# Mean function estimate if continuous single-objective
is_continuous = np.all(problem.is_cont_mask)
if is_continuous:
return FunctionEstimateConstrainedInfill(), 1
# Single-objective ensemble if mixed-discrete
return EnsembleInfill(so_ensemble), 1
# Get infill and set constraint handling technique
infill, n_batch = _get_infill()
if min_pof is not None and min_pof != .5:
infill.constraint_strategy = ProbabilityOfFeasibility(min_pof=min_pof, aggregation=g_aggregation)
else:
infill.constraint_strategy = MeanConstraintPrediction(aggregation=g_aggregation)
return infill, n_batch
class SurrogateInfill:
"""Base class for surrogate infill criteria"""
_exclude = ['surrogate_model']
def __init__(self):
self.problem: Optional[Problem] = None
self.surrogate_model: Optional[Union['SurrogateModel', 'KrgBased']] = None
self.normalization: Optional[Normalization] = None
self.n_obj = 0
self.n_constr = 0
self.n_f_ic = None
self.x_train = None
self.is_active_train = None
self.y_train = None
self.f_infill_log = []
self.g_infill_log = []
self.n_eval_infill = 0
self.time_eval_infill = 0.
def __getstate__(self):
state = self.__dict__.copy()
for key in self._exclude:
state[key] = None
return state
@property
def needs_variance(self):
return False
def get_g_training_set(self, g: np.ndarray) -> np.ndarray:
return g
def set_samples(self, x_train: np.ndarray, is_active_train: np.ndarray, y_train: np.ndarray):
self.x_train = x_train
self.is_active_train = is_active_train
self.y_train = y_train
def predict(self, x: np.ndarray, is_active: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
try:
kwargs = {'is_acting': is_active.astype(bool)} if self.surrogate_model.supports['x_hierarchy'] else {}
y = self.surrogate_model.predict_values(self.normalization.forward(x), **kwargs)
except FloatingPointError:
y = np.zeros((x.shape[0], self.surrogate_model.ny))*np.nan
return self._split_f_g(y)
def predict_variance(self, x: np.ndarray, is_active: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
try:
kwargs = {'is_acting': is_active.astype(bool)} if self.surrogate_model.supports['x_hierarchy'] else {}
y_var = self.surrogate_model.predict_variances(self.normalization.forward(x), **kwargs)
except FloatingPointError:
y_var = np.zeros((x.shape[0], self.surrogate_model.ny))*np.nan
return self._split_f_g(y_var)
def _split_f_g(self, y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
if self.n_constr > 0:
return y[:, :self.n_obj], y[:, self.n_obj:]
return y[:, :self.n_obj], np.zeros((y.shape[0], 0))
def initialize(self, problem: Problem, surrogate_model: 'SurrogateModel', normalization: Normalization):
self.problem = problem
self.n_obj = problem.n_obj
self.n_constr = problem.n_constr
self.surrogate_model = surrogate_model
self.normalization = normalization
self._initialize()
self.n_f_ic = self.get_n_infill_objectives()
def select_infill_solutions(self, population: Population, infill_problem: Problem, n_infill) -> Population:
"""Select infill solutions from resulting population using rank and crowding selection (from NSGA2) algorithm.
This method can be overwritten to implement a custom selection strategy."""
# If there is only one objective, select the best point to prevent selecting duplicate points
if self.n_f_ic == 1:
return filter_optimum(population, least_infeasible=True)
survival = RankAndCrowdingSurvival()
return survival.do(infill_problem, population, n_survive=n_infill)
@staticmethod
def get_pareto_front(f: np.ndarray) -> np.ndarray:
"""Get the non-dominated set of objective values (the Pareto front)."""
i_non_dom = NonDominatedSorting().do(f, only_non_dominated_front=True)
return np.copy(f[i_non_dom, :])
def reset_infill_log(self):
self.f_infill_log = []
self.g_infill_log = []
self.n_eval_infill = 0
self.time_eval_infill = 0.
def evaluate(self, x: np.ndarray, is_active: np.ndarray) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Evaluate the surrogate infill objectives (and optionally constraints). Use the predict and predict_variance
methods to query the surrogate model on its objectives and constraints."""
s = timeit.default_timer()
f_infill, g_infill = self._evaluate(x, is_active)
self.time_eval_infill += timeit.default_timer()-s
self.f_infill_log.append(f_infill)
self.g_infill_log.append(g_infill)
self.n_eval_infill += x.shape[0]
return f_infill, g_infill
def _initialize(self):
pass
def get_n_infill_objectives(self) -> int:
raise NotImplementedError
def get_n_infill_constraints(self) -> int:
raise NotImplementedError
def _evaluate(self, x: np.ndarray, is_active: np.ndarray) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Evaluate the surrogate infill objectives (and optionally constraints). Use the predict and predict_variance
methods to query the surrogate model on its objectives and constraints."""
raise NotImplementedError
class FunctionEstimateInfill(SurrogateInfill):
"""Infill that directly uses the underlying surrogate model prediction."""
def get_n_infill_objectives(self) -> int:
return self.problem.n_obj
def get_n_infill_constraints(self) -> int:
return self.problem.n_constr
def _evaluate(self, x: np.ndarray, is_active: np.ndarray) -> Tuple[np.ndarray, Optional[np.ndarray]]:
f, g = self.predict(x, is_active)
return f, g
class ConstraintAggregation(Enum):
NONE = 0 # No aggregation
ELIMINATE = 1 # Automatically eliminate non-relevant
AGGREGATE = 2 # Aggregate all into 1
class ConstraintStrategy:
"""
Base class for a strategy for dealing with design constraints.
Optionally enables constraint aggregation (max) or elimination (only train models for constraints where for at least
one design point it is violated and all others are satisfied).
"""
def __init__(self, aggregation: ConstraintAggregation = None):
self.problem: Optional[Problem] = None
self.n_trained_g = None
self.aggregation = ConstraintAggregation.NONE if aggregation is None else aggregation
def initialize(self, problem: Problem):
self.problem = problem
def get_g_training_set(self, g: np.ndarray) -> np.ndarray:
# Eliminate constraints that are only violated when at least one other constraint is also violated
if self.aggregation == ConstraintAggregation.ELIMINATE:
g_ref = g
while g_ref.shape[1] > 1:
for i_g in range(g_ref.shape[1]):
is_violated = g_ref[:, i_g] > 0
g_ref_other = np.delete(g_ref, i_g, axis=1)
# No need to train GP if this constraint is never violated
if not np.any(is_violated):
break
all_other_satisfied = np.all(g_ref_other <= 0, axis=1)
i_g_only_active = is_violated & all_other_satisfied
if not np.any(i_g_only_active):
break
else:
break
g_ref = g_ref_other
return g_ref
# Optionally aggregate constraints by taking the maximum value
if self.aggregation == ConstraintAggregation.AGGREGATE:
return np.array([np.max(g, axis=1)]).T
return g
def set_samples(self, x_train: np.ndarray, y_train: np.ndarray):
self.n_trained_g = n_trained_g = y_train.shape[1]-self.problem.n_obj
n_constr = self.problem.n_ieq_constr
if n_trained_g == 0 and n_constr != 0:
raise RuntimeError(f'Expecting at least one trained constraint model ({n_constr}), received 0')
elif n_constr > 0 and (n_trained_g == 0 or n_trained_g > n_constr):
raise RuntimeError(f'Expecting between 1 and {n_constr} constraint models, received {n_trained_g}')
self._set_samples(x_train, y_train)
def _set_samples(self, x_train: np.ndarray, y_train: np.ndarray):
pass
def get_n_infill_constraints(self) -> int:
return self.n_trained_g
def evaluate(self, x: np.ndarray, g: np.ndarray, g_var: np.ndarray) -> np.ndarray:
"""Evaluate the infill constraint function(s) given x and predicted g and g_var"""
raise NotImplementedError
class MeanConstraintPrediction(ConstraintStrategy):
"""Simple use the mean prediction of the constraint functions as the infill constraint"""
def evaluate(self, x: np.ndarray, g: np.ndarray, g_var: np.ndarray) -> np.ndarray:
return g
class ProbabilityOfFeasibility(ConstraintStrategy):
"""
Uses a lower limit on the Probability of Feasibility (PoF) as the infill constraint.
PoF(x) = Phi(-y(x)/sqrt(s(x)))
where
- Phi is the cumulative distribution function of the normal distribution
- y(x) the surrogate model estimate
- s(x) the surrogate model variance estimate
Implementation based on discussion in:
Schonlau, M., "Global Versus Local Search in Constrained Optimization of Computer Models", 1998,
10.1214/lnms/1215456182
"""
def __init__(self, min_pof: float = None, aggregation: ConstraintAggregation = None):
if min_pof is None:
min_pof = .5
self.min_pof = min_pof
super().__init__(aggregation=aggregation)
def evaluate(self, x: np.ndarray, g: np.ndarray, g_var: np.ndarray) -> np.ndarray:
pof = self._pof(g, g_var)
return self.min_pof - pof
@staticmethod
def _pof(g: np.ndarray, g_var: np.ndarray) -> np.ndarray:
pof = norm.cdf(-g/np.sqrt(g_var))
is_nan_mask = np.isnan(pof)
pof[is_nan_mask & (g <= 0.)] = 1.
pof[is_nan_mask & (g > 0.)] = 0.
return pof
class ConstrainedInfill(SurrogateInfill):
"""Base class for an infill criterion with a constraint handling strategy"""
def __init__(self, constraint_strategy: ConstraintStrategy = None, min_pof: float = None):
if constraint_strategy is None:
if min_pof is not None:
constraint_strategy = ProbabilityOfFeasibility(min_pof=min_pof)
else:
constraint_strategy = MeanConstraintPrediction()
self.constraint_strategy = constraint_strategy
super(ConstrainedInfill, self).__init__()
def _initialize(self):
self.constraint_strategy.initialize(self.problem)
def get_g_training_set(self, g: np.ndarray) -> np.ndarray:
return self.constraint_strategy.get_g_training_set(g)
def set_samples(self, x_train: np.ndarray, is_active_train: np.ndarray, y_train: np.ndarray):
super().set_samples(x_train, is_active_train, y_train)
self.constraint_strategy.set_samples(x_train, y_train)
@property
def needs_variance(self):
return True
def get_n_infill_constraints(self) -> int:
return self.constraint_strategy.get_n_infill_constraints()
def _evaluate(self, x: np.ndarray, is_active: np.ndarray) -> Tuple[np.ndarray, Optional[np.ndarray]]:
f, g = self.predict(x, is_active)
f_var, g_var = self.predict_variance(x, is_active)
# Apply constraint handling strategy
g_infill = g
if self.n_constr > 0:
g_infill = self.constraint_strategy.evaluate(x, g, g_var)
f_infill = self.evaluate_f(f, f_var)
return f_infill, g_infill
def get_n_infill_objectives(self) -> int:
raise NotImplementedError
def evaluate_f(self, f_predict: np.ndarray, f_var_predict: np.ndarray) -> np.ndarray:
raise NotImplementedError
class FunctionEstimateConstrainedInfill(ConstrainedInfill):
"""Probability of Feasibility combined with direct function estimate for the objectives."""
def get_n_infill_objectives(self) -> int:
return self.problem.n_obj
def evaluate_f(self, f_predict: np.ndarray, f_var_predict: np.ndarray) -> np.ndarray:
return f_predict
class ExpectedImprovementInfill(ConstrainedInfill):
"""
The Expected Improvement (EI) naturally balances exploitation and exploration by representing the expected amount
of improvement at some point taking into accounts its probability of improvement.
EI(x) = (f_min-y(x)) * Phi((f_min - y(x))/s(x)) + s(x) * phi((f_min - y(x)) / s(x))
where
- f_min is the current best point (real)
- y(x) the surrogate model estimate
- s(x) the surrogate model variance estimate
- Phi is the cumulative distribution function of the normal distribution
- phi is the probability density function of the normal distribution
Implementation based on:
Jones, D.R., "Efficient Global Optimization of Expensive Black-Box Functions", 1998, 10.1023/A:1008306431147
"""
def get_n_infill_objectives(self) -> int:
return self.problem.n_obj
def evaluate_f(self, f_predict: np.ndarray, f_var_predict: np.ndarray) -> np.ndarray:
return self._evaluate_f_ei(f_predict, f_var_predict, self.y_train[:, :f_predict.shape[1]])
@classmethod
def _evaluate_f_ei(cls, f: np.ndarray, f_var: np.ndarray, f_current: np.ndarray) -> np.ndarray:
# Normalize current and predicted objectives
f_pareto = cls.get_pareto_front(f_current)
nadir_point, ideal_point = np.max(f_pareto, axis=0), np.min(f_pareto, axis=0)
nadir_point[nadir_point == ideal_point] = 1.
f_pareto_norm = (f_pareto-ideal_point)/(nadir_point-ideal_point)
f_norm, f_var_norm = cls._normalize_f_var(f, f_var, nadir_point, ideal_point)
# Get EI for each point using closest point in the Pareto front
f_ei = np.empty(f.shape)
for i in range(f.shape[0]):
i_par_closest = np.argmin(np.sum((f_pareto_norm-f_norm[i, :])**2, axis=1))
f_par_min = f_pareto_norm[i_par_closest, :]
ei = cls._ei(f_par_min, f_norm[i, :], f_var_norm[i, :])
ei[ei < 0.] = 0.
f_ei[i, :] = 1.-ei
return f_ei
@staticmethod
def _normalize_f_var(f: np.ndarray, f_var: np.ndarray, nadir_point, ideal_point):
f_norm = (f-ideal_point)/(nadir_point-ideal_point)
f_var_norm = f_var/((nadir_point-ideal_point+1e-30)**2)
return f_norm, f_var_norm
@staticmethod
def _ei(f_min: np.ndarray, f: np.ndarray, f_var: np.ndarray) -> np.ndarray:
dy = f_min-f
ei = dy*norm.cdf(dy/np.sqrt(f_var)) + f_var*norm.pdf(dy/np.sqrt(f_var))
is_nan_mask = np.isnan(ei)
ei[is_nan_mask & (dy > 0.)] = 1.
ei[is_nan_mask & (dy <= 0.)] = 0.
return ei
class MinVariancePFInfill(FunctionEstimateConstrainedInfill):
"""
Minimization of the Variance of Kriging-Predicted Front (MVPF).
This works by first finding a new Pareto front directly using the predicted function value, and then selecting
solutions with the highest variance for infill. This way, search is performed near the Pareto front, but with a
high potential for exploration.
Implementation based on:
dos Passos, A.G., "Multi-Objective Optimization with Kriging Surrogates Using 'moko'", 2018, 10.1590/1679-78254324
"""
def select_infill_solutions(self, population: Population, infill_problem: Problem, n_infill) -> Population:
# Get Pareto front and associated design points
f = population.get('F')
i_pf = self.get_i_pareto_front(f)
pop_pf = population[i_pf]
x_pf = pop_pf.get('X')
is_active_pf = pop_pf.get('is_active').astype(bool)
g_pf = pop_pf.get('G')
# Get variances
f_var, _ = self.predict_variance(x_pf, is_active_pf)
# Select points with highest variances
f_std_obj = 1.-np.sqrt(f_var)
survival = RankAndCrowdingSurvival()
pop_var = Population.new(X=x_pf, F=f_std_obj, G=g_pf)
i_select = survival.do(infill_problem, pop_var, n_survive=n_infill, return_indices=True)
return pop_pf[i_select]
@staticmethod
def get_i_pareto_front(f: np.ndarray) -> np.ndarray:
"""Get the non-dominated set of objective values (the Pareto front)."""
return NonDominatedSorting().do(f, only_non_dominated_front=True)
class ProbabilityOfImprovementInfill(ConstrainedInfill):
"""
Probability of Improvement represents the probability that some point will be better than the current best estimate
with some offset:
PoI(x) = Phi((T - y(x))/sqrt(s(x)))
where
- Phi is the cumulative distribution function of the normal distribution
- T is the improvement target (current best estimate minus some offset)
- y(x) the surrogate model estimate
- s(x) the surrogate model variance estimate
PoI was developed for single-objective optimization, and because of the use of the minimum current objective value,
it tends towards suggesting improvement points only at the edges of the Pareto front. It has been modified to
evaluate the PoI with respect to the closest Pareto front point instead.
Implementation based on:
Hawe, G.I., "An Enhanced Probability of Improvement Utility Function for Locating Pareto Optimal Solutions", 2007
"""
def __init__(self, f_min_offset: float = 0., **kwargs):
self.f_min_offset = f_min_offset
super().__init__(**kwargs)
def get_n_infill_objectives(self) -> int:
return self.problem.n_obj
def evaluate_f(self, f_predict: np.ndarray, f_var_predict: np.ndarray) -> np.ndarray:
return self._evaluate_f_poi(f_predict, f_var_predict, self.y_train[:, :f_predict.shape[1]], self.f_min_offset)
@classmethod
def _evaluate_f_poi(cls, f: np.ndarray, f_var: np.ndarray, f_current: np.ndarray, f_min_offset=0.) -> np.ndarray:
# Normalize current and predicted objectives
f_pareto = cls.get_pareto_front(f_current)
nadir_point, ideal_point = np.max(f_pareto, axis=0), np.min(f_pareto, axis=0)
nadir_point[nadir_point == ideal_point] = 1.
f_pareto_norm = (f_pareto-ideal_point)/(nadir_point-ideal_point)
f_norm, f_var_norm = cls._normalize_f_var(f, f_var, nadir_point, ideal_point)
# Get PoI for each point using closest point in the Pareto front
f_poi = np.empty(f.shape)
for i in range(f.shape[0]):
i_par_closest = np.argmin(np.sum((f_pareto_norm-f_norm[i, :])**2, axis=1))
f_par_targets = f_pareto_norm[i_par_closest, :]-f_min_offset
poi = cls._poi(f_par_targets, f_norm[i, :], f_var_norm[i, :])
f_poi[i, :] = 1.-poi
return f_poi
@staticmethod
def _normalize_f_var(f: np.ndarray, f_var: np.ndarray, nadir_point, ideal_point):
f_norm = (f-ideal_point)/(nadir_point-ideal_point)
f_var_norm = f_var/((nadir_point-ideal_point+1e-30)**2)
return f_norm, f_var_norm
@staticmethod
def _poi(f_targets: np.ndarray, f: np.ndarray, f_var: np.ndarray) -> np.ndarray:
return norm.cdf((f_targets-f) / np.sqrt(f_var))
class LowerConfidenceBoundInfill(ConstrainedInfill):
"""
The Lower Confidence Bound (LCB) represents the lowest expected value to be found at some point given its standard
deviation.
LCB(x) = y(x) - alpha * sqrt(s(x))
where
- y(x) the surrogate model estimate
- alpha is a scaling parameter (typical value is 2) --> lower means more exploitation, higher more exploration
- s(x) the surrogate model variance estimate
Implementation based on:
Cox, D., "A Statistical Method for Global Optimization", 1992, 10.1109/icsmc.1992.271617
"""
def __init__(self, alpha: float = 2., **kwargs):
self.alpha = alpha
super().__init__(**kwargs)
def get_n_infill_objectives(self) -> int:
return self.problem.n_obj
def evaluate_f(self, f_predict: np.ndarray, f_var_predict: np.ndarray) -> np.ndarray:
lcb = f_predict - self.alpha*np.sqrt(f_var_predict)
return lcb
class MinimumPoIInfill(ConstrainedInfill):
"""
The Minimum Probability of Improvement (MPoI) criterion is a multi-objective infill criterion and modifies the
calculation of the domination probability by only considering one objective dimension at a time. This should reduce
computational cost.
Optionally multiplies the MPoI criteria by its first integral moment, to transform it to an EI-like metric. Uses a
similar implementation as `EuclideanEIInfill`.
Implementation based on:
Rahat, A.A.M., "Alternative Infill Strategies for Expensive Multi-Objective Optimisation", 2017,
10.1145/3071178.3071276
Parr, J.M., "Improvement Criteria for Constraint Handling and Multiobjective Optimization", 2013
"""
def __init__(self, euclidean=False, **kwargs):
self.euclidean = euclidean
self.f_pareto = None
super().__init__(**kwargs)
def get_n_infill_objectives(self) -> int:
return 1
def set_samples(self, x_train: np.ndarray, is_active_train: np.ndarray, y_train: np.ndarray):
super().set_samples(x_train, is_active_train, y_train)
self.f_pareto = self.get_pareto_front(y_train[:, :self.problem.n_obj])
def evaluate_f(self, f_predict: np.ndarray, f_var_predict: np.ndarray) -> np.ndarray:
return self.get_mpoi_f(f_predict, f_var_predict, self.f_pareto, self.euclidean)
@classmethod
def get_mpoi_f(cls, f_predict: np.ndarray, f_var_predict: np.ndarray, f_pareto: np.ndarray, euclidean: bool) \
-> np.ndarray:
mpoi = np.empty((f_predict.shape[0], 1))
for i in range(f_predict.shape[0]):
mpoi[i, 0] = cls._mpoi(f_pareto, f_predict[i, :], f_var_predict[i, :], euclidean=euclidean)
mpoi[mpoi < 1e-6] = 0.
return 1.-mpoi
@classmethod
def _mpoi(cls, f_pareto: np.ndarray, f_predict: np.ndarray, var_predict: np.ndarray, euclidean: bool) -> float:
n, n_f = f_pareto.shape
# Probability of being dominated for each point in the Pareto front along each objective dimension
def cdf_not_better(f, f_pred, var_pred): # Rahat 2017, Eq. 11, 12
return ndtr((f_pred-f)/np.sqrt(var_pred))
p_is_dom_dim = np.empty((n, n_f))
for i_f in range(n_f):
p_is_dom_dim[:, i_f] = cdf_not_better(f_pareto[:, i_f], f_predict[i_f], var_predict[i_f])
# Probability of being dominated for each point along all dimensions: Rahat 2017, Eq. 10
p_is_dom = np.prod(p_is_dom_dim, axis=1)
# Probability of domination for each point: Rahat 2017, Eq. 13
p_dom = 1-p_is_dom
# Minimum probability of domination: Rahat 2017, Eq. 14
min_poi = np.min(p_dom)
# Multiply by distance to Pareto front if requested
if euclidean:
min_poi *= cls._get_euclidean_moment(min_poi, f_pareto, f_predict)
return min_poi
@classmethod
def _get_euclidean_moment(cls, p_dominate: float, f_pareto: np.ndarray, f_predict: np.ndarray) -> float:
# If the probability of domination is less than 50%, it means we are on the wrong side of the Pareto front
if p_dominate < .5:
return 0.
return np.min(np.sqrt(np.sum((f_predict-f_pareto) ** 2, axis=1))) # Parr Eq. 6.9
class EnsembleInfill(ConstrainedInfill):
"""
Infill strategy that optimize multiple underlying infill criteria simultaneously, thereby getting the best
compromise between what the different infills suggest.
More information and application:
Lyu, W. et al., 2018, July. Batch Bayesian optimization via multi-objective acquisition ensemble for automated
analog circuit design. In International conference on machine learning (pp. 3306-3314). PMLR.
Inspired by:
Cowen-Rivers, A.I. et al., 2022. HEBO: pushing the limits of sample-efficient hyper-parameter optimisation. Journal
of Artificial Intelligence Research, 74, pp.1269-1349.
"""
def __init__(self, infills: List[ConstrainedInfill] = None, constraint_strategy: ConstraintStrategy = None):
self.infills = infills
super().__init__(constraint_strategy=constraint_strategy)
def _initialize(self):
# Get set of default infills if none given
if self.infills is None:
if self.problem.n_obj == 1:
self.infills = [FunctionEstimateConstrainedInfill(), LowerConfidenceBoundInfill(),
ExpectedImprovementInfill(), ProbabilityOfImprovementInfill()]
else:
self.infills = [FunctionEstimateConstrainedInfill(), LowerConfidenceBoundInfill()]
# Reset the constraint handling strategies of the underlying infills and initialize them
for infill in self.infills:
if isinstance(infill, ConstrainedInfill):
infill.constraint_strategy = IgnoreConstraints()
infill.initialize(self.problem, self.surrogate_model, self.normalization)
super()._initialize()
def set_samples(self, x_train: np.ndarray, is_active_train: np.ndarray, y_train: np.ndarray):
super().set_samples(x_train, is_active_train, y_train)
for infill in self.infills:
infill.set_samples(x_train, is_active_train, y_train)
def get_n_infill_objectives(self) -> int:
return sum([infill.get_n_infill_objectives() for infill in self.infills])
def evaluate_f(self, f_predict: np.ndarray, f_var_predict: np.ndarray) -> np.ndarray:
# Merge underlying infill criteria
f_underlying = [infill.evaluate_f(f_predict, f_var_predict) for infill in self.infills]
return np.column_stack(f_underlying)
def select_infill_solutions(self, population: Population, infill_problem: Problem, n_infill) -> Population:
# Get the Pareto front
opt_pop = filter_optimum(population, least_infeasible=True)
# If we have less infills available than requested, return all
if len(opt_pop) <= n_infill:
return opt_pop
# If there are less infills than objectives requested, randomly select from the Pareto front
if n_infill <= self.n_f_ic:
i_select = np.random.choice(len(opt_pop), n_infill)
return opt_pop[i_select]
# Select by repeatedly eliminating crowded points from the Pareto front
for _ in range(len(opt_pop)-n_infill):
crowding_of_front = calc_crowding_distance(opt_pop.get('F'))
min_crowding = np.min(crowding_of_front)
i_min_crowding = np.where(crowding_of_front == min_crowding)[0]
i_remove = np.random.choice(i_min_crowding) if len(i_min_crowding) > 1 else i_min_crowding[0]
i_keep = np.ones((len(opt_pop),), dtype=bool)
i_keep[i_remove] = False
opt_pop = opt_pop[i_keep]
return opt_pop
class IgnoreConstraints(ConstraintStrategy):
def get_n_infill_constraints(self) -> int:
return 0
def evaluate(self, x: np.ndarray, g: np.ndarray, g_var: np.ndarray) -> np.ndarray:
return np.zeros((x.shape[0], 0))
class HCInfill(SurrogateInfill):
"""Infill that wraps another infill and modifies it for dealing with hidden constraints"""
def __init__(self, infill: SurrogateInfill, hc_strategy: HiddenConstraintStrategy):
self._infill = infill
self._hc_strategy = hc_strategy
super().__init__()
@property
def needs_variance(self):
return self._infill.needs_variance
def set_samples(self, x_train: np.ndarray, is_active_train: np.ndarray, y_train: np.ndarray):
self._infill.set_samples(x_train, is_active_train, y_train)
def predict(self, x: np.ndarray, is_active: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
return self._infill.predict(x, is_active)
def _initialize(self):
self._infill.initialize(self.problem, self.surrogate_model, self.normalization)
def select_infill_solutions(self, population, infill_problem, n_infill):
return self._infill.select_infill_solutions(population, infill_problem, n_infill)
def reset_infill_log(self):
super().reset_infill_log()
self._infill.reset_infill_log()
def predict_variance(self, x: np.ndarray, is_active: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
return self._infill.predict_variance(x, is_active)
def get_n_infill_objectives(self) -> int:
return self._infill.get_n_infill_objectives()
def get_n_infill_constraints(self) -> int:
n_constr = self._infill.get_n_infill_constraints()
if self._hc_strategy.adds_infill_constraint():
n_constr += 1
return n_constr
def _evaluate(self, x: np.ndarray, is_active: np.ndarray) -> Tuple[np.ndarray, Optional[np.ndarray]]:
f_infill, g_infill = self._infill.evaluate(x, is_active)
f_infill = self._hc_strategy.mod_infill_objectives(x, f_infill)
if self._hc_strategy.adds_infill_constraint():
g_hc = self._hc_strategy.evaluate_infill_constraint(x)
g_infill = np.column_stack([g_infill, g_hc]) if g_infill is not None else np.array([g_hc]).T
return f_infill, g_infill
|
/sb_arch_opt-1.1.5-py3-none-any.whl/sb_arch_opt/algo/arch_sbo/infill.py
| 0.885328 | 0.422803 |
infill.py
|
pypi
|
from pymoo.core.indicator import Indicator
from pymoo.indicators.hv import Hypervolume
from pymoo.util.display.column import Column
from pymoo.core.termination import TerminateIfAny
from pymoo.termination.max_gen import MaximumGenerationTermination
from sb_arch_opt.algo.pymoo_interface.metrics import *
__all__ = ['EstimatedPFDistance', 'get_sbo_termination', 'PFDistanceTermination', 'SBOMultiObjectiveOutput']
def get_sbo_termination(n_max_infill: int, tol=1e-3, n_filter=2):
return PFDistanceTermination(tol=tol, n_filter=n_filter, n_max_infill=n_max_infill)
class EstimatedPFDistance(Indicator):
"""Indicates the distance between the current Pareto front and the one estimated by the underlying model"""
def __init__(self):
super().__init__()
self.algorithm = None
def _do(self, f, *args, **kwargs):
if self.algorithm is None:
raise RuntimeError('Algorithm not set!')
from sb_arch_opt.algo.arch_sbo.algo import InfillAlgorithm, SBOInfill
if len(f) == 0:
return 1
if isinstance(self.algorithm, InfillAlgorithm) and isinstance(self.algorithm.infill_obj, SBOInfill):
sbo_infill = self.algorithm.infill_obj
pf_estimate = sbo_infill.get_pf_estimate()
if pf_estimate is None:
return 1
hv = Hypervolume(pf=pf_estimate)
hv_estimate = hv.do(pf_estimate)
hv_f = hv.do(f)
hv_dist = 1 - (hv_f / hv_estimate)
if hv_dist < 0:
hv_dist = 0
return hv_dist
return 0
class PFDistanceTermination(TerminateIfAny):
"""Termination criterion tracking the difference between the found and estimated Pareto fronts"""
def __init__(self, tol=1e-3, n_filter=2, n_max_infill=100):
self._pf_dist = EstimatedPFDistance()
termination = [
IndicatorDeltaToleranceTermination(SmoothedIndicator(self._pf_dist, n_filter=n_filter), tol),
MaximumGenerationTermination(n_max_gen=n_max_infill),
]
super().__init__(*termination)
def update(self, algorithm):
self._pf_dist.algorithm = algorithm
return super().update(algorithm)
class SBOMultiObjectiveOutput(EHVMultiObjectiveOutput):
"""Extended multi-objective output for use with SBO"""
def __init__(self):
super().__init__()
self.pf_dist_col = Column('pf_dist')
self.pf_dist = EstimatedPFDistance()
def initialize(self, algorithm):
super().initialize(algorithm)
self.pf_dist.algorithm = algorithm
self.columns += [self.pf_dist_col]
def update(self, algorithm):
super().update(algorithm)
f, feas = algorithm.opt.get("F", "feas")
f = f[feas]
self.pf_dist_col.set(self.pf_dist.do(f) if len(f) > 0 else None)
|
/sb_arch_opt-1.1.5-py3-none-any.whl/sb_arch_opt/algo/arch_sbo/metrics.py
| 0.790449 | 0.326432 |
metrics.py
|
pypi
|
import numpy as np
from typing import *
from sb_arch_opt.problem import *
from sb_arch_opt.algo.arch_sbo.models import *
from pymoo.util.normalization import Normalization, SimpleZeroToOneNormalization
try:
from smt.surrogate_models.surrogate_model import SurrogateModel
assert HAS_ARCH_SBO
except ImportError:
assert not HAS_ARCH_SBO
try:
from sklearn.ensemble import RandomForestClassifier as RFC
HAS_SKLEARN = True
except ImportError:
HAS_SKLEARN = False
__all__ = ['get_hc_strategy', 'HiddenConstraintStrategy', 'PredictionHCStrategy', 'PredictorInterface',
'SKLearnClassifier', 'RandomForestClassifier', 'SMTPredictor', 'MDGPRegressor', 'HAS_SKLEARN',
'RejectionHCStrategy', 'ReplacementHCStrategyBase', 'GlobalWorstReplacement']
def get_hc_strategy():
"""
Get a hidden constraints strategy that works well for most problems.
"""
# Get the predictor: RF works best but requires scikit-learn
try:
predictor = RandomForestClassifier()
except ImportError:
predictor = MDGPRegressor()
# Create the strategy: use as additional constraint at Probability of Validity >= 50%
return PredictionHCStrategy(predictor, constraint=True, min_pov=.5)
class HiddenConstraintStrategy:
"""
Base class for implementing a strategy for dealing with hidden constraints.
"""
@staticmethod
def is_failed(y: np.ndarray):
return np.any(~np.isfinite(y), axis=1)
def initialize(self, problem: ArchOptProblemBase):
pass
def mod_xy_train(self, x: np.ndarray, y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Modify inputs and outputs for the surrogate model used for the main infill function"""
return x, y
def prepare_infill_search(self, x: np.ndarray, y: np.ndarray):
"""Prepare infill search given the (non-modified) normalized inputs and outputs"""
def adds_infill_constraint(self) -> bool:
"""Whether the strategy adds an inequality constraint to the infill search problem"""
return False
def evaluate_infill_constraint(self, x: np.ndarray) -> np.ndarray:
"""If the problem added an infill constraint, evaluate it here, returning an nx-length vector"""
def mod_infill_objectives(self, x: np.ndarray, f_infill: np.ndarray) -> np.ndarray:
"""Modify the infill objectives (in-place)"""
return f_infill
def __str__(self):
raise NotImplementedError
def __repr__(self):
raise NotImplementedError
class RejectionHCStrategy(HiddenConstraintStrategy):
"""Strategy that simply rejects failed points before training the model"""
def mod_xy_train(self, x: np.ndarray, y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
# Remove failed points from the training set
is_not_failed = ~self.is_failed(y)
return x[is_not_failed, :], y[is_not_failed, :]
def __str__(self):
return 'Rejection'
def __repr__(self):
return f'{self.__class__.__name__}()'
class ReplacementHCStrategyBase(HiddenConstraintStrategy):
"""Base class for a strategy that replaces failed outputs by some value"""
def __init__(self):
self._normalization: Optional[SimpleZeroToOneNormalization] = None
super().__init__()
def initialize(self, problem: ArchOptProblemBase):
self._normalization = SimpleZeroToOneNormalization(xl=problem.xl, xu=problem.xu)
def mod_xy_train(self, x: np.ndarray, y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
# Separate into failed and valid (non-failed) set
is_failed = self.is_failed(y)
x_valid = x[~is_failed, :]
y_valid = y[~is_failed, :]
x_failed = x[is_failed, :]
y_failed = y[is_failed, :]
# If there are no failed points, no need to replace
if x_failed.shape[0] == 0:
return x, y
# If there are no valid points, replace with 1
if y_valid.shape[0] == 0:
y_failed_replace = np.ones(y_failed.shape)
else:
y_failed_replace = self._replace_y(x_failed, y_failed, x_valid, y_valid)
# Replace values
y = y.copy()
y[is_failed, :] = y_failed_replace
return x, y
def _replace_y(self, x_failed: np.ndarray, y_failed: np.ndarray, x_valid: np.ndarray, y_valid: np.ndarray) \
-> np.ndarray:
"""Return values for replacing y_failed (x values are normalized)"""
raise NotImplementedError
def get_replacement_strategy_name(self) -> str:
raise NotImplementedError
def __str__(self):
return f'Replacement: {self.get_replacement_strategy_name()}'
def __repr__(self):
return f'{self.__class__.__name__}()'
class GlobalWorstReplacement(ReplacementHCStrategyBase):
"""Replace failed values with the worst values known for these outputs"""
def _replace_y(self, x_failed: np.ndarray, y_failed: np.ndarray, x_valid: np.ndarray, y_valid: np.ndarray) \
-> np.ndarray:
# Get global worst values
y_worst = np.max(y_valid, axis=0)
# Replace
y_replace = np.zeros(y_failed.shape)+y_worst
return y_replace
def get_replacement_strategy_name(self) -> str:
return 'Global Worst'
class PredictorInterface:
"""Interface class for some validity predictor"""
_training_doe = {}
_reset_pickle_keys = []
def __init__(self):
self.training_set = None
self._normalization: Optional[Normalization] = None
self._trained_single_class = None
def __getstate__(self):
state = self.__dict__.copy()
for key in self._reset_pickle_keys:
if key in state:
state[key] = None
return state
def initialize(self, problem: ArchOptProblemBase):
self._normalization = self._get_normalization(problem)
self._initialize(problem)
def _get_normalization(self, problem: ArchOptProblemBase) -> Normalization:
return SimpleZeroToOneNormalization(xl=problem.xl, xu=problem.xu, estimate_bounds=False)
def _initialize(self, problem: ArchOptProblemBase):
pass
def train(self, x: np.ndarray, y_is_valid: np.ndarray):
# Check if we are training a classifier with only 1 class
self._trained_single_class = single_class = y_is_valid[0] if len(set(y_is_valid)) == 1 else None
if single_class is None:
self._train(x, y_is_valid)
def evaluate_probability_of_validity(self, x: np.ndarray) -> np.ndarray:
if self._trained_single_class is not None:
return np.ones((x.shape[0],))*self._trained_single_class
return self._evaluate_probability_of_validity(x)
def _train(self, x: np.ndarray, y_is_valid: np.ndarray):
"""Train the model (x's are not normalized), y_is_valid is a vector"""
raise NotImplementedError
def _evaluate_probability_of_validity(self, x: np.ndarray) -> np.ndarray:
"""Get the probability of validity (0 to 1) at nx points (x is not normalized); should return a vector!"""
raise NotImplementedError
def __str__(self):
raise NotImplementedError
def __repr__(self):
return f'{self.__class__.__name__}()'
class PredictionHCStrategy(HiddenConstraintStrategy):
"""Base class for a strategy that predictions where failed regions occur"""
def __init__(self, predictor: PredictorInterface, constraint=True, min_pov=.5):
check_dependencies()
self.predictor = predictor
self.constraint = constraint
self.min_pov = min_pov
super().__init__()
def initialize(self, problem: ArchOptProblemBase):
self.predictor.initialize(problem)
def mod_xy_train(self, x: np.ndarray, y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
# Remove failed points form the training set
is_not_failed = ~self.is_failed(y)
return x[is_not_failed, :], y[is_not_failed, :]
def prepare_infill_search(self, x: np.ndarray, y: np.ndarray):
is_failed = self.is_failed(y)
y_is_valid = (~is_failed).astype(float)
self.predictor.train(x, y_is_valid)
self.predictor.training_set = (x, y_is_valid)
def adds_infill_constraint(self) -> bool:
return self.constraint
def evaluate_infill_constraint(self, x: np.ndarray) -> np.ndarray:
pov = self.predictor.evaluate_probability_of_validity(x)
pov = np.clip(pov, 0, 1)
return self.min_pov-pov
def mod_infill_objectives(self, x: np.ndarray, f_infill: np.ndarray) -> np.ndarray:
pov = self.predictor.evaluate_probability_of_validity(x)
pov = np.clip(pov, 0, 1)
# The infill objectives are a minimization of some value between 0 and 1:
# - The function-based infills (prediction mean), the underlying surrogates are trained on normalized y values
# - The expected improvement is normalized between 0 and 1, where 1 corresponds to no expected improvement
return 1-((1-f_infill).T*pov).T
def __str__(self):
type_str = 'G' if self.constraint else 'F'
type_str += f' min_pov={self.min_pov}' if self.constraint and self.min_pov != .5 else ''
return f'Prediction {type_str}: {self.predictor!s}'
def __repr__(self):
min_pov_str = f', min_pov={self.min_pov}' if self.constraint else ''
return f'{self.__class__.__name__}({self.predictor!r}, constraint={self.constraint}{min_pov_str})'
class SKLearnClassifier(PredictorInterface):
_reset_pickle_keys = ['_predictor']
def __init__(self):
self._predictor = None
super().__init__()
def _evaluate_probability_of_validity(self, x: np.ndarray) -> np.ndarray:
x_norm = self._normalization.forward(x)
pov = self._predictor.predict_proba(x_norm)[:, 1] # Probability of belonging to class 1 (valid points)
return pov[:, 0] if len(pov.shape) == 2 else pov
def _train(self, x: np.ndarray, y_is_valid: np.ndarray):
self._do_train(self._normalization.forward(x), y_is_valid)
def _do_train(self, x_norm: np.ndarray, y_is_valid: np.ndarray):
raise NotImplementedError
def __str__(self):
raise NotImplementedError
class RandomForestClassifier(SKLearnClassifier):
def __init__(self, n: int = 100):
if not HAS_SKLEARN:
raise ImportError(f'ArchSBO dependencies not installed: pip install sb-arch-opt[arch_sbo]')
self.n = n
super().__init__()
def _do_train(self, x_norm: np.ndarray, y_is_valid: np.ndarray):
self._predictor = clf = RFC(n_estimators=self.n)
clf.fit(x_norm, y_is_valid)
def __str__(self):
return f'Random Forest Classifier ({self.n})'
def __repr__(self):
return f'{self.__class__.__name__}(n={self.n})'
class SMTPredictor(PredictorInterface):
_reset_pickle_keys = ['_model']
def __init__(self):
self._model: Optional['SurrogateModel'] = None
super().__init__()
def _evaluate_probability_of_validity(self, x: np.ndarray) -> np.ndarray:
return self._model.predict_values(self._normalization.forward(x))[:, 0]
def _train(self, x: np.ndarray, y_is_valid: np.ndarray):
self._do_train(self._normalization.forward(x), np.array([y_is_valid]).T)
def _do_train(self, x_norm: np.ndarray, y_is_valid: np.ndarray):
raise NotImplementedError
def __str__(self):
raise NotImplementedError
class MDGPRegressor(SMTPredictor):
"""Uses SMT's mixed-discrete Kriging regressor"""
def __init__(self):
self._problem = None
super().__init__()
def _get_normalization(self, problem: ArchOptProblemBase) -> Normalization:
return ModelFactory(problem).get_md_normalization()
def _initialize(self, problem: ArchOptProblemBase):
self._problem = problem
def _do_train(self, x_norm: np.ndarray, y_is_valid: np.ndarray):
model, _ = ModelFactory(self._problem).get_md_kriging_model(corr='abs_exp', theta0=[1e-2], n_start=5)
self._model = model
model.set_training_values(x_norm, y_is_valid)
model.train()
def __str__(self):
return 'MD-GP'
|
/sb_arch_opt-1.1.5-py3-none-any.whl/sb_arch_opt/algo/arch_sbo/hc_strategy.py
| 0.900086 | 0.533276 |
hc_strategy.py
|
pypi
|
from sb_arch_opt.problem import *
from sb_arch_opt.algo.arch_sbo.models import *
from sb_arch_opt.algo.arch_sbo.algo import *
from sb_arch_opt.algo.arch_sbo.infill import *
from sb_arch_opt.algo.arch_sbo.metrics import *
from sb_arch_opt.algo.arch_sbo.hc_strategy import *
if not HAS_ARCH_SBO:
get_sbo_termination = lambda *_, **__: None
__all__ = ['get_arch_sbo_rbf', 'get_arch_sbo_gp', 'HAS_ARCH_SBO', 'get_sbo_termination', 'get_sbo']
def get_arch_sbo_rbf(init_size: int = 100, results_folder=None, **kwargs) -> InfillAlgorithm:
"""
Get a architecture SBO algorithm using an RBF model as its surrogate model.
"""
model = ModelFactory.get_rbf_model()
hc_strategy = get_hc_strategy()
return get_sbo(model, FunctionEstimateInfill(), init_size=init_size, results_folder=results_folder,
hc_strategy=hc_strategy, **kwargs)
def get_arch_sbo_gp(problem: ArchOptProblemBase, init_size: int = 100, n_parallel=None, min_pof: float = None,
kpls_n_dim: int = 10, g_aggregation: ConstraintAggregation = None, results_folder=None, **kwargs) \
-> InfillAlgorithm:
"""
Get an architecture SBO algorithm using a mixed-discrete Gaussian Process (Kriging) model as its surrogate model.
Appropriate (multi-objective) infills and constraint handling techniques are automatically selected.
For constraint handling, increase min_pof to between 0.50 and 0.75 to be more conservative (i.e. require a higher
probability of being feasible for infill points) or decrease below 0.50 to be more exploratory. Optionally defined
an aggregation strategy to reduce the number of models to train.
To reduce model training times for high-dimensional problems, KPLS is used instead of Kriging when the problem
dimension exceeds kpls_n_dim. Note that the DoE should then contain at least kpls_n_dim+1 points.
"""
# Create the mixed-discrete Kriging model, correctly configured for the given design space
kpls_n_comp = kpls_n_dim if kpls_n_dim is not None and problem.n_var > kpls_n_dim else None
model, normalization = ModelFactory(problem).get_md_kriging_model(kpls_n_comp=kpls_n_comp)
# Select the single- or multi-objective infill criterion, including constraint handling strategy
infill, infill_batch = get_default_infill(
problem, n_parallel=n_parallel, min_pof=min_pof, g_aggregation=g_aggregation)
# Get default hidden constraint strategy
hc_strategy = get_hc_strategy()
return get_sbo(model, infill, infill_size=infill_batch, init_size=init_size, normalization=normalization,
results_folder=results_folder, hc_strategy=hc_strategy, **kwargs)
def get_sbo(surrogate_model, infill: 'SurrogateInfill', infill_size: int = 1, init_size: int = 100,
infill_pop_size: int = 100, infill_gens: int = 100, repair=None, normalization=None,
hc_strategy: 'HiddenConstraintStrategy' = None, results_folder=None, **kwargs) -> InfillAlgorithm:
"""Create the SBO algorithm given some SMT surrogate model and an infill criterion"""
sbo = SBOInfill(surrogate_model, infill, pop_size=infill_pop_size, termination=infill_gens, repair=repair,
normalization=normalization, hc_strategy=hc_strategy, verbose=True)\
.algorithm(infill_size=infill_size, init_size=init_size, **kwargs)
if results_folder is not None:
sbo.store_intermediate_results(results_folder=results_folder)
return sbo
|
/sb_arch_opt-1.1.5-py3-none-any.whl/sb_arch_opt/algo/arch_sbo/api.py
| 0.855187 | 0.263771 |
api.py
|
pypi
|
import os
import copy
import numpy as np
from typing import *
from dataclasses import dataclass
import pymoo.core.variable as var
from pymoo.core.problem import Problem
from sb_arch_opt.problem import ArchOptProblemBase
from sb_arch_opt.design_space import ArchDesignSpace
from sb_arch_opt.sampling import HierarchicalSampling
from pymoo.util.normalization import Normalization, SimpleZeroToOneNormalization
try:
os.environ['USE_NUMBA_JIT'] = '1'
from smt.surrogate_models.rbf import RBF
from smt.surrogate_models.surrogate_model import SurrogateModel
from smt.surrogate_models.krg import KRG, KrgBased
from smt.surrogate_models.kpls import KPLS
from smt.surrogate_models.krg_based import MixIntKernelType, MixHrcKernelType
from smt.utils.design_space import BaseDesignSpace
import smt.utils.design_space as ds
HAS_ARCH_SBO = True
except ImportError:
HAS_ARCH_SBO = False
class BaseDesignSpace:
pass
class SurrogateModel:
pass
__all__ = ['check_dependencies', 'HAS_ARCH_SBO', 'ModelFactory', 'MixedDiscreteNormalization', 'SBArchOptDesignSpace',
'MultiSurrogateModel']
def check_dependencies():
if not HAS_ARCH_SBO:
raise ImportError(f'ArchSBO dependencies not installed: pip install sb-arch-opt[arch_sbo]')
@dataclass
class SMTDesignSpaceSpec:
var_defs: List[dict] # [{'name': name, 'lb': lb, 'ub', ub}, ...]
design_space: 'SBArchOptDesignSpace'
is_mixed_discrete: bool
class MixedDiscreteNormalization(Normalization):
"""Normalizes continuous variables to [0, 1], moves integer variables to start at 0"""
def __init__(self, design_space: ArchDesignSpace):
self._design_space = design_space
self._is_cont_mask = design_space.is_cont_mask
self._is_int_mask = design_space.is_int_mask
super().__init__()
def forward(self, x):
x_norm = x.copy()
xl, xu = self._design_space.xl, self._design_space.xu
norm = xu - xl
norm[norm == 0] = 1e-32
cont_mask = self._is_cont_mask
x_norm[:, cont_mask] = (x[:, cont_mask] - xl[cont_mask]) / norm[cont_mask]
int_mask = self._is_int_mask
x_norm[:, int_mask] = x[:, int_mask] - xl[int_mask]
return x_norm
def backward(self, x):
x_abs = x.copy()
xl, xu = self._design_space.xl, self._design_space.xu
cont_mask = self._is_cont_mask
x_abs[:, cont_mask] = x[:, cont_mask]*(xu[cont_mask]-xl[cont_mask]) + xl[cont_mask]
int_mask = self._is_int_mask
x_abs[:, int_mask] = x[:, int_mask] + xl[int_mask]
return x_abs
class ModelFactory:
def __init__(self, problem: ArchOptProblemBase):
self.problem = problem
def get_smt_design_space_spec(self) -> SMTDesignSpaceSpec:
"""Get information about the design space as needed by SMT and SEGOMOE"""
check_dependencies()
return self.create_smt_design_space_spec(self.problem.design_space)
@staticmethod
def create_smt_design_space_spec(arch_design_space: ArchDesignSpace, md_normalize=False, cont_relax=False):
check_dependencies()
design_space = SBArchOptDesignSpace(arch_design_space, md_normalize=md_normalize, cont_relax=cont_relax)
is_mixed_discrete = not np.all(arch_design_space.is_cont_mask)
var_defs = [{'name': f'x{i}', 'lb': bounds[0], 'ub': bounds[1]}
for i, bounds in enumerate(design_space.get_num_bounds())]
return SMTDesignSpaceSpec(
var_defs=var_defs,
design_space=design_space,
is_mixed_discrete=is_mixed_discrete,
)
@staticmethod
def get_continuous_normalization(problem: Problem):
return SimpleZeroToOneNormalization(xl=problem.xl, xu=problem.xu, estimate_bounds=False)
def get_md_normalization(self):
return MixedDiscreteNormalization(self.problem.design_space)
@staticmethod
def get_rbf_model():
check_dependencies()
return RBF(print_global=False, d0=1., poly_degree=-1, reg=1e-10)
@staticmethod
def get_kriging_model(multi=True, kpls_n_comp: int = None, **kwargs):
check_dependencies()
if kpls_n_comp is not None:
surrogate = KPLS(print_global=False, n_comp=kpls_n_comp, **kwargs)
else:
surrogate = KRG(print_global=False, **kwargs)
if multi:
surrogate = MultiSurrogateModel(surrogate)
return surrogate
def get_md_kriging_model(self, kpls_n_comp: int = None, multi=True, **kwargs_) -> Tuple['SurrogateModel', Normalization]:
check_dependencies()
normalization = self.get_md_normalization()
norm_ds_spec = self.create_smt_design_space_spec(self.problem.design_space, md_normalize=True)
kwargs = dict(
print_global=False,
design_space=norm_ds_spec.design_space,
categorical_kernel=MixIntKernelType.EXP_HOMO_HSPHERE,
hierarchical_kernel=MixHrcKernelType.ALG_KERNEL,
)
if norm_ds_spec.is_mixed_discrete:
kwargs['n_start'] = kwargs.get('n_start', 5)
kwargs.update(kwargs_)
if kpls_n_comp is not None:
cr_ds_spec = self.create_smt_design_space_spec(
self.problem.design_space, md_normalize=True, cont_relax=True)
kwargs['design_space'] = cr_ds_spec.design_space
surrogate = KPLS(n_comp=kpls_n_comp, **kwargs)
else:
surrogate = KRG(**kwargs)
if multi:
surrogate = MultiSurrogateModel(surrogate)
return surrogate, normalization
class SBArchOptDesignSpace(BaseDesignSpace):
"""SMT design space implementation using SBArchOpt's design space logic"""
_global_disable_hierarchical_cat_fix = False
def __init__(self, arch_design_space: ArchDesignSpace, md_normalize=False, cont_relax=False):
self._ds = arch_design_space
self.normalize = MixedDiscreteNormalization(arch_design_space) if md_normalize else None
self._cont_relax = cont_relax
super().__init__()
@property
def arch_design_space(self) -> ArchDesignSpace:
return self._ds
def _get_design_variables(self) -> List['ds.DesignVariable']:
"""Return the design variables defined in this design space if not provided upon initialization of the class"""
smt_des_vars = []
is_conditional = self._ds.is_conditionally_active
normalize = self.normalize is not None
cont_relax = self._cont_relax
for i, dv in enumerate(self._ds.des_vars):
if isinstance(dv, var.Real):
bounds = (0, 1) if normalize else dv.bounds
smt_des_vars.append(ds.FloatVariable(bounds[0], bounds[1]))
elif isinstance(dv, var.Integer):
bounds = (0, dv.bounds[1]-dv.bounds[0]) if normalize else dv.bounds
if cont_relax:
smt_des_vars.append(ds.FloatVariable(bounds[0], bounds[1]))
else:
smt_des_vars.append(ds.IntegerVariable(bounds[0], bounds[1]))
elif isinstance(dv, var.Binary):
if cont_relax:
smt_des_vars.append(ds.FloatVariable(0, 1))
else:
smt_des_vars.append(ds.OrdinalVariable(values=[0, 1]))
elif isinstance(dv, var.Choice):
if cont_relax:
smt_des_vars.append(ds.FloatVariable(0, len(dv.options)-1))
else:
# Conditional categorical variables are currently not supported
if is_conditional[i] and not self._global_disable_hierarchical_cat_fix:
smt_des_vars.append(ds.IntegerVariable(0, len(dv.options)-1))
else:
smt_des_vars.append(ds.CategoricalVariable(values=dv.options))
else:
raise ValueError(f'Unexpected variable type: {dv!r}')
return smt_des_vars
def _is_conditionally_acting(self) -> np.ndarray:
return self._ds.is_conditionally_active
def _correct_get_acting(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
if self.normalize is not None:
x = self.normalize.backward(x)
x, is_active = self._ds.correct_x(x)
if self.normalize is not None:
x = self.normalize.forward(x)
return x, is_active
def _sample_valid_x(self, n: int) -> Tuple[np.ndarray, np.ndarray]:
sampler = HierarchicalSampling()
stub_problem = ArchOptProblemBase(self._ds)
x, is_active = sampler.sample_get_x(stub_problem, n)
if self.normalize is not None:
x = self.normalize.forward(x)
return x, is_active
def __str__(self):
return 'SBArchOpt Design Space'
def __repr__(self):
return f'{self.__class__.__name__}({self._ds!r})'
class MultiSurrogateModel(SurrogateModel):
"""SMT surrogate model wrapper that trains independent models for each provided output"""
def __init__(self, surrogate: 'SurrogateModel', **kwargs):
super().__init__(**kwargs)
self._surrogate = surrogate
self._is_krg = isinstance(surrogate, KrgBased)
self._models: List['SurrogateModel'] = []
self.supports = self._surrogate.supports
self.options["print_global"] = False
@property
def name(self):
return f'Multi{self._surrogate.name}'
def _initialize(self):
self.supports["derivatives"] = False
def set_training_values(self, xt: np.ndarray, yt: np.ndarray, name=None, is_acting=None) -> None:
self._models = models = []
for iy in range(yt.shape[1]):
model: Union['KrgBased', 'SurrogateModel'] = copy.deepcopy(self._surrogate)
if self._is_krg:
model.set_training_values(xt, yt[:, [iy]], is_acting=is_acting)
else:
model.set_training_values(xt, yt[:, [iy]])
models.append(model)
def train(self) -> None:
theta0 = None
for i, model in enumerate(self._models):
if i > 0 and isinstance(model, KrgBased) and theta0 is not None:
model.options['theta0'] = theta0
model.train()
if i == 0 and isinstance(model, KrgBased):
try:
theta0 = list(model.optimal_theta)
except AttributeError:
pass
def predict_values(self, x: np.ndarray, is_acting=None) -> np.ndarray:
model: Union['SurrogateModel', 'KrgBased']
if self._is_krg:
values = [model.predict_values(x, is_acting=is_acting) for model in self._models]
else:
values = [model.predict_values(x) for model in self._models]
return np.column_stack(values)
def predict_variances(self, x: np.ndarray, is_acting=None) -> np.ndarray:
model: Union['SurrogateModel', 'KrgBased']
if self._is_krg:
values = [model.predict_variances(x, is_acting=is_acting) for model in self._models]
else:
values = [model.predict_variances(x) for model in self._models]
return np.column_stack(values)
def _predict_values(self, x: np.ndarray, is_acting=None) -> np.ndarray:
raise RuntimeError
|
/sb_arch_opt-1.1.5-py3-none-any.whl/sb_arch_opt/algo/arch_sbo/models.py
| 0.82485 | 0.307611 |
models.py
|
pypi
|
import logging
import numpy as np
from pymoo.core.population import Population
from sb_arch_opt.problem import ArchOptProblemBase
try:
from smarty.problem.optimizationProblem import CustomOptProb
from smarty.optimize.sbo import SBO
from smarty.optimize import convergenceCriteria as CC
from smarty import Log
HAS_SMARTY = True
except ImportError:
HAS_SMARTY = False
__all__ = ['HAS_SMARTY', 'check_dependencies', 'SMARTyArchOptInterface']
log = logging.getLogger('sb_arch_opt.smarty')
def check_dependencies():
if not HAS_SMARTY:
raise ImportError(f'SMARTy not installed!')
class SMARTyArchOptInterface:
"""
Interface class to SMARTy SBO.
"""
def __init__(self, problem: ArchOptProblemBase, n_init: int, n_infill: int):
check_dependencies()
Log.SetLogLevel(1)
self._problem = problem
self._n_init = n_init
self._n_infill = n_infill
self._has_g = problem.n_ieq_constr > 0
self._opt_prob = None
self._optimizer = None
@property
def problem(self):
return self._problem
@property
def opt_prob(self):
if self._opt_prob is None:
bounds = np.column_stack([self._problem.xl, self._problem.xu])
problem_structure = {'objFuncs': {f'f{i}': 'F' for i in range(self._problem.n_obj)}}
if self._has_g:
problem_structure['constrFuncs'] = {f'g{i}': 'F' for i in range(self._problem.n_ieq_constr)}
self._opt_prob = CustomOptProb(bounds=bounds, problemStructure=problem_structure,
customFunctionHandler=self._evaluate, vectorized=True,
problemName=repr(self._problem))
return self._opt_prob
@property
def optimizer(self) -> 'SBO':
if self._optimizer is None:
self._optimizer = sbo = SBO(self.opt_prob)
for key, settings in sbo._settingsDOE.items():
settings['nSamples'] = self._n_init
return self._optimizer
def _evaluate(self, x, _):
out = self._problem.evaluate(x, return_as_dictionary=True)
outputs = {}
for i in range(self._problem.n_obj):
outputs[f'objFuncs/f{i}/F'] = out['F'][:, i]
for i in range(self._problem.n_ieq_constr):
outputs[f'constrFuncs/g{i}/F'] = out['G'][:, i]
return outputs
@property
def pop(self) -> Population:
f, g, idx = self.opt_prob.CreateObjAndConstrMatrices()
x = self.opt_prob.inputMatrix[idx]
kwargs = {'X': x, 'F': f}
if self._problem.n_ieq_constr > 0:
kwargs['G'] = g
return Population.new(**kwargs)
def _get_infill(self):
if self._problem.n_obj == 1:
return 'EI'
elif self._problem.n_obj == 2:
return 'EHVI2D'
return 'WFGEHVI'
def _get_convergence(self):
if self._problem.n_obj == 1:
return [
CC.AbsOptXChange(1e-8, 5),
CC.MinInfillValue(1e-6, 4),
]
return [
CC.StallIterations(5),
]
def optimize(self):
"""Run the optimization loop for n_infill infill points (on top on the initialization points)"""
optimizer = self.optimizer
optimizer.Optimize(
nMaxIters=self._n_infill,
listConvCrit=self._get_convergence(),
infillMethod=self._get_infill(),
)
|
/sb_arch_opt-1.1.5-py3-none-any.whl/sb_arch_opt/algo/smarty_interface/algo.py
| 0.820037 | 0.168139 |
algo.py
|
pypi
|
import logging
import numpy as np
from typing import Optional
from sb_arch_opt.problem import *
from sb_arch_opt.util import capture_log
from sb_arch_opt.algo.pymoo_interface.api import ResultsStorageCallback, ArchOptEvaluator
from sb_arch_opt.algo.pymoo_interface.storage_restart import initialize_from_previous_results
from ConfigSpace import ConfigurationSpace, Float, Integer, Categorical
import pymoo.core.variable as var
from pymoo.core.algorithm import Algorithm
from pymoo.core.population import Population
from pymoo.util.optimum import filter_optimum
from pymoo.core.initialization import Initialization
from pymoo.util.display.single import SingleObjectiveOutput
try:
from tpe.optimizer import TPEOptimizer
HAS_TPE = True
except ImportError:
HAS_TPE = False
__all__ = ['HAS_TPE', 'ArchTPEInterface', 'TPEAlgorithm', 'initialize_from_previous_results']
log = logging.getLogger('sb_arch_opt.tpe')
def check_dependencies():
if not HAS_TPE:
raise RuntimeError(f'TPE dependencies not installed: pip install -e .[tpe]')
class ArchTPEInterface:
"""
Class for interfacing the Tree-structured Parzen Estimator (TPE) optimization algorithm. For more info, see:
Bergstra et al., "Algorithms for Hyper-Parameter Optimization", 2011, available at:
https://papers.nips.cc/paper/2011/file/86e8f7ab32cfd12577bc2619bc635690-Paper.pdf
Currently only supports single-objective unconstrained problems.
"""
def __init__(self, problem: ArchOptProblemBase):
check_dependencies()
capture_log()
if problem.n_obj != 1:
raise ValueError(f'Currently only single-objective problems are supported!')
if problem.n_ieq_constr != 0 or problem.n_eq_constr != 0:
raise ValueError(f'Currently only unconstrained problems are supported!')
self._problem = problem
self._optimizer: Optional['TPEOptimizer'] = None
def initialize(self):
self._optimizer = self._get_optimizer()
def ask_init(self):
if self._optimizer is None:
self.initialize()
return self._convert_to_x(self._optimizer.initial_sample())
def ask(self):
if self._optimizer is None:
self.initialize()
return self._convert_to_x(self._optimizer.sample())
def _convert_to_x(self, config):
is_cat_mask = self._problem.is_cat_mask
x = []
for ix in range(self._problem.n_var):
key = f'x{ix}'
x.append(int(config[key]) if is_cat_mask[ix] else config[key])
return np.array([x])
def tell(self, x: np.ndarray, f: float):
assert x.shape == (self._problem.n_var,)
assert self._optimizer is not None
out_config = {}
is_cat_mask = self._problem.is_cat_mask
for ix in range(self._problem.n_var):
key = f'x{ix}'
out_config[key] = str(int(x[ix])) if is_cat_mask[ix] else x[ix]
# Report outputs
results = {'f': f}
self._optimizer.update(out_config, results, runtime=0.)
def optimize(self, n_init: int, n_infill: int):
self.initialize()
x_results, f_results = [], []
for i_iter in range(n_init+n_infill):
is_init = i_iter < n_init
log.info(f'Iteration {i_iter+1}/{n_init+n_infill} ({"init" if is_init else "infill"})')
# Get next point to evaluate
x_eval = self.ask_init() if is_init else self.ask()
# Evaluate
out = self._problem.evaluate(x_eval, return_as_dictionary=True)
x_out = out['X'][0, :]
f = out['F'][0, 0]
self.tell(x_out, f)
log.info(f'Evaluated: {f:.3g} @ {x_out}')
x_results.append(x_out)
f_results.append(f)
x_results, f_results = np.array(x_results), np.array(f_results)
return x_results, f_results
def _get_optimizer(self):
return TPEOptimizer(
obj_func=lambda *args, **kwargs: None, # We're using the ask-tell interface
config_space=self._get_config_space(),
metric_name='f',
result_keys=['f'],
)
def _get_config_space(self):
params = {}
for i, dv in enumerate(self._problem.des_vars):
name = f'x{i}'
if isinstance(dv, var.Real):
params[name] = Float(name, bounds=dv.bounds)
elif isinstance(dv, var.Integer):
params[name] = Integer(name, bounds=dv.bounds)
elif isinstance(dv, var.Binary):
params[name] = Integer(name, bounds=(0, 1))
elif isinstance(dv, var.Choice):
params[name] = Categorical(name, items=[str(i) for i in range(len(dv.options))])
else:
raise ValueError(f'Unknown variable type: {dv!r}')
return ConfigurationSpace(space=params)
class TPEInitialization(Initialization):
def __init__(self):
self.interface: Optional[ArchTPEInterface] = None
super().__init__(sampling=None)
def do(self, problem, n_samples, **kwargs):
x_init = np.row_stack([self.interface.ask_init() for _ in range(n_samples)])
return Population.new(X=x_init)
class TPEAlgorithm(Algorithm):
"""
The Tree-structured Parzen Estimator (TPE) optimization algorithm implemented as a pymoo Algorithm.
Note that through pymoo itself you can also access Optuna's TPE algorithm, however that one does not support design
space hierarchy like SBArchOpt supports it.
"""
def __init__(self, n_init: int, results_folder=None, output=SingleObjectiveOutput(), **kwargs):
self._interface: Optional[ArchTPEInterface] = None
self.n_init = n_init
self.initialization = TPEInitialization()
evaluator = ArchOptEvaluator(results_folder=results_folder)
callback = ResultsStorageCallback(results_folder) if results_folder is not None else None
super().__init__(evaluator=evaluator, callback=callback, output=output, **kwargs)
def _setup(self, problem, **kwargs):
if not isinstance(problem, ArchOptProblemBase):
raise RuntimeError(f'The TPE algorithm only works with SBArchOpt problem definitions!')
self._interface = interface = ArchTPEInterface(problem)
interface.initialize()
if isinstance(self.initialization, TPEInitialization):
self.initialization.interface = self._interface
def _initialize_infill(self):
return self.initialization.do(self.problem, self.n_init)
def _infill(self):
return Population.new(X=self._interface.ask())
def _initialize_advance(self, infills=None, **kwargs):
self._advance(infills, is_init=True, **kwargs)
def _advance(self, infills=None, is_init=False, **kwargs):
if not is_init:
self.pop = Population.merge(self.pop, infills)
x, f = infills.get('X'), infills.get('F')
for i in range(len(infills)):
self._interface.tell(x[i, :], f[i, 0])
def _set_optimum(self):
pop = self.pop
if self.opt is not None:
pop = Population.merge(self.opt, pop)
self.opt = filter_optimum(pop, least_infeasible=True)
|
/sb_arch_opt-1.1.5-py3-none-any.whl/sb_arch_opt/algo/tpe_interface/api.py
| 0.870239 | 0.301748 |
api.py
|
pypi
|
import math
import numpy as np
from pymoo.core.individual import Individual
from pymoo.core.infill import InfillCriterion
from pymoo.core.population import Population
from pymoo.core.problem import Problem
from pymoo.core.variable import Choice, Real, Integer, Binary
from pymoo.operators.crossover.sbx import SBX
from pymoo.operators.crossover.ux import UX
from pymoo.operators.mutation.bitflip import BFM
from pymoo.operators.mutation.pm import PM
from pymoo.operators.mutation.rm import ChoiceRandomMutation
from pymoo.operators.repair.rounding import RoundingRepair
from pymoo.operators.selection.rnd import RandomSelection
__all__ = ['MixedDiscreteMating']
class MixedDiscreteMating(InfillCriterion):
"""SBArchOpt implementation of mixed-discrete mating (crossover and mutation) operations. Similar functionality as
`pymoo.core.mixed.MixedVariableMating`, however keeps x as a matrix."""
def __init__(self,
selection=RandomSelection(),
crossover=None,
mutation=None,
repair=None,
eliminate_duplicates=True,
n_max_iterations=100,
**kwargs):
super().__init__(repair, eliminate_duplicates, n_max_iterations, **kwargs)
if crossover is None:
crossover = {
Binary: UX(),
Real: SBX(),
Integer: SBX(vtype=float, repair=RoundingRepair()),
Choice: UX(),
}
if mutation is None:
mutation = {
Binary: BFM(),
Real: PM(),
Integer: PM(vtype=float, repair=RoundingRepair()),
Choice: ChoiceRandomMutation(),
}
self.selection = selection
self.crossover = crossover
self.mutation = mutation
def _do(self, problem, pop, n_offsprings, parents=False, **kwargs):
# So far we assume all crossover need the same amount of parents and create the same number of offsprings
n_parents_crossover = 2
n_offspring_crossover = 2
# the variables with the concrete information
var_defs = problem.vars
# group all the variables by their types
vars_by_type = {}
for ik, (k, v) in enumerate(var_defs.items()):
clazz = type(v)
if clazz not in vars_by_type:
vars_by_type[clazz] = []
vars_by_type[clazz].append((ik, k))
# # all different recombinations (the choices need to be split because of data types)
recomb = []
for clazz, list_of_vars in vars_by_type.items():
if clazz == Choice:
for idx, var_name in list_of_vars:
recomb.append((clazz, [var_name], np.array([idx])))
else:
idx, var_names = zip(*list_of_vars)
recomb.append((clazz, var_names, np.array(idx)))
# create an empty population that will be set in each iteration
x_out = np.empty((n_offsprings, len(var_defs)))
if not parents:
n_select = math.ceil(n_offsprings / n_offspring_crossover)
pop = self.selection(problem, pop, n_select, n_parents_crossover, **kwargs)
for clazz, list_of_vars, x_idx in recomb:
crossover = self.crossover[clazz]
assert crossover.n_parents == n_parents_crossover and crossover.n_offsprings == n_offspring_crossover
_parents = [[Individual(X=parent.X[x_idx]) for parent in parents] for parents in pop]
_vars = [var_defs[e] for e in list_of_vars]
_xl, _xu = None, None
if clazz in [Real, Integer]:
_xl, _xu = np.array([v.bounds for v in _vars]).T
_problem = Problem(vars=_vars, xl=_xl, xu=_xu)
while True:
_off = crossover(_problem, _parents, **kwargs)
mutation = self.mutation[clazz]
_off = mutation(_problem, _off, **kwargs)
# Sometimes NaN's might sneak into the outputs, try again if this is the case
x_off = _off.get('X')[:n_offsprings, :]
if np.any(np.isnan(x_off)):
continue
break
x_out[:, x_idx] = x_off
return Population.new(X=x_out)
|
/sb_arch_opt-1.1.5-py3-none-any.whl/sb_arch_opt/algo/pymoo_interface/md_mating.py
| 0.773858 | 0.441131 |
md_mating.py
|
pypi
|
import os
import pickle
import logging
import numpy as np
import pandas as pd
from typing import Optional
from pymoo.core.result import Result
from pymoo.core.callback import Callback
from pymoo.core.algorithm import Algorithm
from pymoo.core.evaluator import Evaluator
from pymoo.core.population import Population, Individual
from pymoo.core.initialization import Initialization
from sb_arch_opt.util import capture_log
from sb_arch_opt.problem import ArchOptProblemBase
from sb_arch_opt.sampling import LargeDuplicateElimination
__all__ = ['load_from_previous_results', 'initialize_from_previous_results', 'ResultsStorageCallback',
'ArchOptEvaluator']
log = logging.getLogger('sb_arch_opt.pymoo')
def load_from_previous_results(problem: ArchOptProblemBase, result_folder: str) -> Optional[Population]:
"""Load a (cumulative) Population from previously-stored results"""
capture_log()
# Try to load using problem-specific function first
population = problem.load_previous_results(result_folder)
if population is not None and len(population) > 0:
log.info(f'Previous results loaded from problem results: {len(population)} design points')
# Additionally try to load from pymoo storage to merge with non-evaluated design points
pymoo_population = ArchOptEvaluator.load_pop(result_folder)
if pymoo_population is not None and len(pymoo_population) > 0:
if population is None:
log.info(f'Previous results loaded from pymoo results: {len(pymoo_population)} design points')
population = pymoo_population
elif len(pymoo_population) > len(population):
unique_points = LargeDuplicateElimination().do(pymoo_population, population, to_itself=False)
if len(unique_points) > 0:
log.info(f'Merged additional design points from pymoo results: {len(unique_points)} design points')
population = Population.merge(population, unique_points)
if population is None:
return
# Set evaluated flags
def _set_eval(ind: Individual):
nonlocal n_evaluated
# Assume evaluated but failed points have Inf as output values
is_eval = ~np.all(np.isnan(ind.get('F')))
if is_eval:
ind.evaluated.update({'X', 'F', 'G', 'H'})
n_evaluated += 1
n_evaluated = 0
population.apply(_set_eval)
log.info(f'Evaluation status: {n_evaluated} of {len(population)} ({(n_evaluated/len(population))*100:.1f}%) '
f'are already evaluated')
return population
def initialize_from_previous_results(algorithm: Algorithm, problem: ArchOptProblemBase, result_folder: str) -> bool:
"""Initialize an Algorithm from previously stored results"""
capture_log()
if not hasattr(algorithm, 'initialization'):
raise RuntimeError(f'Algorithm has no initialization step, cannot set initial population: {algorithm!r}')
# Try to load from previous results
population = load_from_previous_results(problem, result_folder)
if population is None:
log.info(f'No previous population found, not changing initialization strategy')
return False
# Set static initialization on the algorithm to start from the loaded population
algorithm.initialization = Initialization(population)
# Initialize cumulative population
if isinstance(algorithm.evaluator, ArchOptEvaluator):
algorithm.evaluator.initialize_cumulative(population)
return True
class ResultsStorageCallback(Callback):
"""
Optimization callback that stores final optimization results in pymoo_results.pkl
"""
def __init__(self, results_folder: str, callback=None):
self.results_folder = results_folder
os.makedirs(results_folder, exist_ok=True)
self.callback = callback
super().__init__()
def initialize(self, algorithm: Algorithm):
# Hook into the results function to store final results
result_func = algorithm.result
def wrapped_result():
result = result_func()
# Store pymoo results
if result.algorithm is not None:
result.algorithm.callback = None
try:
self._store_results(result)
except MemoryError:
result.history = None
result.algorithm = None
try:
self._store_results(result)
except MemoryError:
log.info('Could not store pymoo result object: MemoryError')
return result
algorithm.result = wrapped_result
def _store_results(self, result: Result):
with open(os.path.join(self.results_folder, 'pymoo_results.pkl'), 'wb') as fp:
pickle.dump(result, fp)
def __call__(self, *args, **kwargs):
super().__call__(*args, **kwargs)
if self.callback is not None:
self.callback(*args, **kwargs)
class ArchOptEvaluator(Evaluator):
"""
Evaluate that adds some optional functionalities useful for architecture optimization:
- It implements the extreme barrier approach for dealing with hidden constraints: NaN outputs are replaced by Inf
- It stores intermediate results during evaluation, which also allows results to be stored during a large DoE for
example, instead of only when an algorithm makes a new iteration
Batch process size is determined using `get_n_batch_evaluate` if not specified explicitly!
Also using the ResultsStorageCallback ensures that also final problem-specific results are stored.
"""
def __init__(self, *args, results_folder: str = None, n_batch=None, **kwargs):
self.results_folder = results_folder
if results_folder is not None:
os.makedirs(results_folder, exist_ok=True)
self.n_batch = n_batch
self._cumulative_pop = None
super().__init__(*args, **kwargs)
self._evaluated_pop = None
self._non_eval_cumulative_pop = None
def initialize_cumulative(self, cumulative_pop: Population):
# Set cumulative population and correct the nr of evaluations
self._cumulative_pop = cumulative_pop
self.n_eval = len(self._get_idx_evaluated(cumulative_pop))
def eval(self, problem, pop: Population, skip_already_evaluated: bool = None, evaluate_values_of: list = None,
count_evals: bool = True, **kwargs):
# Get pop being skipped in order to complete the intermediate storage
skip_already_evaluated = self.skip_already_evaluated if skip_already_evaluated is None else skip_already_evaluated
self._evaluated_pop = None
if skip_already_evaluated:
i_evaluated = self._get_idx_evaluated(pop, evaluate_values_of=evaluate_values_of)
self._evaluated_pop = pop[i_evaluated]
# Get portion of the cumulative population that is currently not under evaluation
self._non_eval_cumulative_pop = None
if self._cumulative_pop is not None:
is_duplicate = LargeDuplicateElimination.eliminate(self._cumulative_pop.get('X'), pop.get('X'))
self._non_eval_cumulative_pop = self._cumulative_pop[~is_duplicate]
results = super().eval(problem, pop, skip_already_evaluated=skip_already_evaluated,
evaluate_values_of=evaluate_values_of, count_evals=count_evals, **kwargs)
# Post-evaluation storage
if self.results_folder is not None:
self._store_intermediate(problem, pop)
self._non_eval_cumulative_pop = None
return results
def _get_idx_evaluated(self, pop: Population, evaluate_values_of: list = None):
evaluate_values_of = self.evaluate_values_of if evaluate_values_of is None else evaluate_values_of
return [i for i, ind in enumerate(pop) if all([e in ind.evaluated for e in evaluate_values_of])]
def _eval(self, problem, pop, evaluate_values_of, **kwargs):
if self.results_folder is None:
super()._eval(problem, pop, evaluate_values_of, **kwargs)
else:
# Evaluate in batch and store intermediate results
n_batch = self.n_batch
if n_batch is None and isinstance(problem, ArchOptProblemBase):
n_batch = problem.get_n_batch_evaluate()
if n_batch is None:
n_batch = 1 # Assume there is no batch processing, and we want to save after every evaluation
for i_batch in range(0, len(pop), n_batch):
batch_pop = pop[i_batch:i_batch+n_batch]
super()._eval(problem, batch_pop, evaluate_values_of, **kwargs)
self._apply_extreme_barrier(batch_pop)
intermediate_pop = self._normalize_pop(pop, evaluate_values_of, evaluated_pop=self._evaluated_pop)
self._store_intermediate(problem, intermediate_pop)
# Apply extreme barrier: replace NaN with Inf
self._apply_extreme_barrier(pop)
return pop
@staticmethod
def _apply_extreme_barrier(pop: Population):
for key in ['F', 'G', 'H']:
values = pop.get(key)
values[np.isnan(values)] = np.inf
pop.set(key, values)
@staticmethod
def _normalize_pop(pop: Population, evaluate_values_of, evaluated_pop: Population = None) -> Population:
"""Ensure that the matrices in a Population are two-dimensional"""
pop_data = {}
for key in (['X']+evaluate_values_of):
data = pop.get(key, to_numpy=False)
partial_data = np.zeros((len(data), len(data[0])))*np.nan
for i, row in enumerate(data):
if row is not None and len(row) > 0:
partial_data[i, :] = row
data = partial_data
pop_data[key] = data
normalized_pop = Population.new(**pop_data)
if evaluated_pop is not None:
normalized_pop = Population.merge(evaluated_pop, normalized_pop)
return normalized_pop
def _store_intermediate(self, problem, pop: Population):
# Store pymoo population
self._store_pop(pop)
# Store cumulative pymoo population
if self._non_eval_cumulative_pop is not None:
unique_non_eval_pop = LargeDuplicateElimination().do(self._non_eval_cumulative_pop, pop, to_itself=False)
self._cumulative_pop = Population.merge(unique_non_eval_pop, pop)
else:
self._cumulative_pop = pop
self._store_pop(self._cumulative_pop, cumulative=True)
# Store problem-specific results
self._store_intermediate_problem(problem)
def _store_pop(self, pop: Population, cumulative=False):
with open(self._get_pop_file_path(self.results_folder, cumulative=cumulative), 'wb') as fp:
pickle.dump(pop, fp)
if len(pop) > 0:
cumulative_str = '_cumulative' if cumulative else ''
csv_path = os.path.join(self.results_folder, f'pymoo_population{cumulative_str}.csv')
self.get_pop_as_df(pop).to_csv(csv_path)
@staticmethod
def get_pop_as_df(pop: Population) -> pd.DataFrame:
cols = []
all_data = []
for symbol in ['x', 'f', 'g', 'h']:
data = pop.get(symbol.upper())
all_data.append(data)
cols += [f'{symbol}{i}' for i in range(data.shape[1])]
return pd.DataFrame(columns=cols, data=np.column_stack(all_data))
def _store_intermediate_problem(self, problem):
if isinstance(problem, ArchOptProblemBase):
problem.store_results(self.results_folder)
@classmethod
def load_pop(cls, results_folder: str) -> Optional[Population]:
pop_path = cls._get_pop_file_path(results_folder, cumulative=True)
if not os.path.exists(pop_path):
pop_path = cls._get_pop_file_path(results_folder)
if not os.path.exists(pop_path):
return
with open(pop_path, 'rb') as fp:
pop = pickle.load(fp)
if not isinstance(pop, Population):
raise ValueError(f'Loaded population not of type Population ({pop_path}): {pop!r}')
return pop
@staticmethod
def _get_pop_file_path(results_folder, cumulative=False) -> str:
cumulative_str = '_cumulative' if cumulative else ''
return os.path.join(results_folder, f'pymoo_population{cumulative_str}.pkl')
|
/sb_arch_opt-1.1.5-py3-none-any.whl/sb_arch_opt/algo/pymoo_interface/storage_restart.py
| 0.865665 | 0.328395 |
storage_restart.py
|
pypi
|
import logging
from pymoo.core.algorithm import Algorithm
from pymoo.algorithms.moo.nsga2 import NSGA2, RankAndCrowdingSurvival
from pymoo.termination.max_eval import MaximumFunctionCallTermination
from sb_arch_opt.sampling import *
from sb_arch_opt.util import capture_log
from sb_arch_opt.problem import ArchOptRepair
from sb_arch_opt.algo.pymoo_interface.metrics import *
from sb_arch_opt.algo.pymoo_interface.md_mating import *
from sb_arch_opt.algo.pymoo_interface.storage_restart import *
__all__ = ['provision_pymoo', 'ArchOptNSGA2', 'get_nsga2', 'initialize_from_previous_results', 'ResultsStorageCallback',
'ArchOptEvaluator', 'get_default_termination', 'DeltaHVTermination', 'ArchOptEvaluator',
'load_from_previous_results', 'get_doe_algo', 'DOEAlgorithm']
log = logging.getLogger('sb_arch_opt.pymoo')
def provision_pymoo(algorithm: Algorithm, set_init=True, results_folder=None):
"""
Provisions a pymoo Algorithm to work correctly for architecture optimization:
- Sets initializer using a repaired sampler (if `set_init = True`)
- Sets a repair operator
- Optionally stores intermediate and final results in some results folder
- Replace NaN outputs with Inf
"""
capture_log()
if set_init and hasattr(algorithm, 'initialization'):
algorithm.initialization = get_init_sampler()
if hasattr(algorithm, 'repair'):
algorithm.repair = ArchOptRepair()
if results_folder is not None:
algorithm.callback = ResultsStorageCallback(results_folder, callback=algorithm.callback)
algorithm.evaluator = ArchOptEvaluator(results_folder=results_folder)
return algorithm
class ArchOptNSGA2(NSGA2):
"""NSGA2 preconfigured with mixed-variable operators and other architecture optimization measures"""
def __init__(self,
pop_size=100,
sampling=HierarchicalSampling(),
repair=ArchOptRepair(),
mating=MixedDiscreteMating(repair=ArchOptRepair(), eliminate_duplicates=LargeDuplicateElimination()),
eliminate_duplicates=LargeDuplicateElimination(),
survival=RankAndCrowdingSurvival(),
output=EHVMultiObjectiveOutput(),
results_folder=None,
**kwargs):
evaluator = ArchOptEvaluator(results_folder=results_folder)
callback = ResultsStorageCallback(results_folder) if results_folder is not None else None
super().__init__(pop_size=pop_size, sampling=sampling, repair=repair, mating=mating,
eliminate_duplicates=eliminate_duplicates, survival=survival, output=output,
evaluator=evaluator, callback=callback, **kwargs)
def get_nsga2(pop_size: int, results_folder=None, **kwargs):
"""Returns a NSGA2 algorithm preconfigured to work with mixed-discrete variables and other architecture optimization
measures"""
capture_log()
return ArchOptNSGA2(pop_size=pop_size, results_folder=results_folder, **kwargs)
class DOEAlgorithm(ArchOptNSGA2):
"""Algorithm that stops after initialization"""
def has_next(self):
return not self.is_initialized
def _infill(self):
raise RuntimeError('Infill should not be called!')
def get_doe_algo(doe_size: int, results_folder=None, **kwargs):
"""Returns an algorithm preconfigured for architecture optimization that will only run a DOE. Useful when
evaluations is expensive and more inspection is needed before continuing with optimization"""
capture_log()
algo = DOEAlgorithm(pop_size=doe_size, results_folder=results_folder, **kwargs)
algo.termination = MaximumFunctionCallTermination(n_max_evals=doe_size)
return algo
|
/sb_arch_opt-1.1.5-py3-none-any.whl/sb_arch_opt/algo/pymoo_interface/api.py
| 0.820685 | 0.258124 |
api.py
|
pypi
|
import os
import logging
import numpy as np
from typing import Tuple
from sb_arch_opt.sampling import *
from sb_arch_opt.util import capture_log
from pymoo.core.population import Population
from sb_arch_opt.problem import ArchOptProblemBase
from pymoo.util.nds.non_dominated_sorting import NonDominatedSorting
try:
from segomoe.sego import Sego
from segomoe.constraint import Constraint
from segomoe.sego_defs import get_sego_file_map, ExitStatus
from sb_arch_opt.algo.arch_sbo.models import ModelFactory
HAS_SEGOMOE = True
except ImportError:
HAS_SEGOMOE = False
__all__ = ['HAS_SEGOMOE', 'check_dependencies', 'SEGOMOEInterface']
log = logging.getLogger('sb_arch_opt.segomoe')
def check_dependencies():
if not HAS_SEGOMOE:
raise ImportError(f'SEGOMOE not installed!')
class SEGOMOEInterface:
"""
Class for interfacing with SEGOMOE
"""
def __init__(self, problem: ArchOptProblemBase, results_folder: str, n_init: int, n_infill: int, use_moe=True,
sego_options=None, model_options=None, verbose=True):
check_dependencies()
self._problem = problem
self._results_folder = results_folder
self.n_init = n_init
self.n_infill = n_infill
self.use_moe = use_moe
self.sego_options = sego_options or {}
self.model_options = model_options or {}
self.verbose = verbose
self._x = None
self._x_failed = None
self._y = None
@property
def x(self) -> np.ndarray:
"""Design vectors"""
if self._x is None:
return np.zeros((0, self._problem.n_var))
return self._x
@property
def n(self) -> int:
"""Number of available successfully evaluated points"""
return 0 if self._x is None else self._x.shape[0]
@property
def x_failed(self) -> np.ndarray:
"""Design vectors"""
if self._x_failed is None:
return np.zeros((0, self._problem.n_var))
return self._x_failed
@property
def n_failed(self) -> int:
"""Number of failed points"""
return 0 if self._x_failed is None else self._x_failed.shape[0]
@property
def n_tried(self):
"""Number of points that were tried to be evaluated (n + n_failed)"""
return self.n + self.n_failed
@property
def y(self) -> np.ndarray:
"""All outputs: f, g, h"""
if self._y is None:
p = self._problem
return np.zeros((0, p.n_obj + p.n_ieq_constr + p.n_eq_constr))
return self._y
@property
def f(self) -> np.ndarray:
"""Objective values"""
f, _, _ = self._split_y(self.y)
return f
@property
def g(self) -> np.ndarray:
"""Inequality constraints"""
_, g, _ = self._split_y(self.y)
return g
@property
def h(self) -> np.ndarray:
"""Equality constraints"""
_, _, h = self._split_y(self.y)
return h
@property
def pop(self) -> Population:
"""Population of all evaluated points"""
return self.get_population(self.x, self.y)
@property
def opt(self) -> Population:
"""Optimal points (Pareto front if multi-objective)"""
return self._get_pareto_front(self.pop)
def initialize_from_previous(self, results_folder: str = None):
capture_log()
if results_folder is None:
results_folder = self._results_folder
# Load from problem state
population = self._problem.load_previous_results(results_folder)
if population is not None:
self._x, self._x_failed, self._y = self._get_xy(population)
log.info(f'Previous results loaded from problem results: {len(population)} design points '
f'({self.n} ok, {self.n_failed} failed)')
return
# Load from optimizer state
x_path, y_path, x_failed_path = self._get_doe_paths()
if os.path.exists(x_path) and os.path.exists(y_path):
self._x = np.load(x_path)
if os.path.exists(x_failed_path):
self._x_failed = np.load(x_failed_path)
else:
self._x_failed = np.zeros((0, self._problem.n_var))
# Flip inequality constraints, as the problem defines satisfaction G <= 0 but SEGOMOE saves it as opposite
self._y = self._flip_g(np.load(y_path))
log.info(f'Previous results loaded from optimizer state: {self._x.shape[0]} design points '
f'({self.n} ok, {self.n_failed} failed)')
return
log.info('No previous results found')
def run_optimization(self):
capture_log()
# Automatically initialize from previous results if reusing the same storage folder
if self._x is None:
self.initialize_from_previous()
# Run DOE if needed
n_available = self.n_tried
if n_available < self.n_init:
log.info(f'Running DOE of {self.n_init-n_available} points ({self.n_init} total)')
self.run_doe(self.n_init-n_available)
# Run optimization
n_available = self.n_tried
if n_available < self.n_init+self.n_infill:
n_infills = self.n_infill - (n_available-self.n_init)
log.info(f'Running optimization: {n_infills} infill points (ok DOE points: {self.n})')
self.run_infills(n_infills)
# Save final results and return Pareto front
self._save_results()
return self.opt
def run_doe(self, n: int = None):
if n is None:
n = self.n_init
x_doe = self._sample_doe(n)
self._x, self._x_failed, self._y = self._get_xy(self._evaluate(x_doe))
if self._x.shape[0] < 2:
log.info(f'Not enough points sampled ({self._x.shape[0]} success, {self._x_failed.shape[0]} failed),'
f'problems with model fitting can be expected')
self._save_results()
def _sample_doe(self, n: int) -> np.ndarray:
return HierarchicalSampling().do(self._problem, n).get('X')
def run_infills(self, n_infills: int = None):
if n_infills is None:
n_infills = self.n_infill
for i in range(n_infills):
# Ask for a new infill point
log.info(f'Getting new infill point {i+1}/{n_infills} (point {self._x.shape[0]+1} overall)')
x = self._ask_infill()
# Evaluate and impute
log.info(f'Evaluating point {i+1}/{n_infills} (point {self._x.shape[0]+1} overall)')
x, x_failed, y = self._get_xy(self._evaluate(np.array([x])))
# Update and save DOE
self._x = np.row_stack([self._x, x])
self._y = np.row_stack([self._y, y])
self._x_failed = np.row_stack([self._x_failed, x_failed])
self._save_results()
def _ask_infill(self) -> np.ndarray:
"""
Ask for one infill point, we do this in order to support imputation of the design vector.
Implementation inspired by:
https://github.com/OneraHub/WhatsOpt/blob/master/services/whatsopt_server/optimizer_store/segomoe_optimizer.py
https://github.com/OneraHub/WhatsOpt/blob/master/services/whatsopt_server/optimizer_store/segmoomoe_optimizer.py
"""
def _dummy_f_grouped(_):
return np.max(self._y, axis=1), False
sego = self._get_sego(_dummy_f_grouped)
res = sego.run_optim(n_iter=1)
if res is not None and res[0] == ExitStatus.runtime_error[0]:
raise RuntimeError(f'Error during SEGOMOE infill search: {res[0]}')
# Return latest point as suggested infill point
return sego.get_x(i=-1)
def _get_sego(self, f_grouped):
design_space_spec = self._get_design_space()
model_type = {
'type': 'MIXEDsmt' if design_space_spec.is_mixed_discrete else 'KRGsmt',
'regr': 'constant',
'corr': 'squar_exp',
'theta0': [1e-3],
'thetaL': [1e-6],
'thetaU': [10.],
'normalize': True,
**self.model_options,
}
if design_space_spec.is_mixed_discrete:
raise RuntimeError('Mixed-discrete API currently not supported')
# model_type['xtypes'] = design_space_spec.var_types
# model_type['xlimits'] = design_space_spec.var_limits
optim_settings = {
'grouped_eval': True,
'n_obj': self._problem.n_obj,
'model_type': {'obj': model_type, 'con': model_type},
'n_clusters': 0 if self.use_moe else 1,
'optimizer': 'slsqp',
'analytical_diff': False,
'profiling': False,
'verbose': self.verbose,
'cst_crit': 'MC',
**self.sego_options,
}
return Sego(
fun=f_grouped,
var=design_space_spec.var_defs,
const=self._get_constraints(),
optim_settings=optim_settings,
path_hs=self._results_folder,
comm=None,
)
def _get_design_space(self):
return ModelFactory(self._problem).get_smt_design_space_spec()
def _get_constraints(self):
constraints = []
for i in range(self._problem.n_ieq_constr):
constraints.append(Constraint(con_type='<', bound=0., name=f'g{i}'))
for i in range(self._problem.n_eq_constr):
constraints.append(Constraint(con_type='=', bound=0., name=f'h{i}'))
return constraints
def _save_results(self):
x_path, y_path, x_failed_path = self._get_doe_paths()
if self._x is not None:
np.save(x_path, self._x)
if self._y is not None:
# Flip inequality constraints, as SEGOMOE stores them as G >= 0, however the problem defines it as opposite
np.save(y_path, self._flip_g(self._y))
if self._x_failed is not None and self._x_failed.shape[0] > 0:
np.save(x_failed_path, self._x_failed)
elif os.path.exists(x_failed_path):
os.remove(x_failed_path)
self._problem.store_results(self._results_folder)
def _get_doe_paths(self):
return self._get_sego_file_path('x'), self._get_sego_file_path('y'), self._get_sego_file_path('x_fails')
def _get_sego_file_path(self, key):
return os.path.join(self._results_folder, get_sego_file_map()[key])
def _evaluate(self, x: np.ndarray) -> Population:
"""
Evaluates a list of design points (x is a matrix of size n x nx). A population is returned with matrices:
- X: imputed design vectors
- is_active: activeness vectors (booleans defining which design variable is active in each design vector)
- F: objective values
- G: inequality constraints (None if there are no inequality constraints)
- H: equality constraints (None if there are no equality constraints)
"""
out = self._problem.evaluate(x, return_as_dictionary=True)
return Population.new(**out)
def _get_xy(self, population: Population) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Concatenate evaluation outputs (F, G, H) and split x into evaluated and failed points.
Returns: x, x_failed, y"""
# Concatenate outputs
outputs = [population.get('F')]
if self._problem.n_ieq_constr > 0:
outputs.append(population.get('G'))
if self._problem.n_eq_constr > 0:
outputs.append(population.get('H'))
y = np.column_stack(outputs)
# Split x into ok and failed points
x = population.get('X')
is_failed = self._problem.get_failed_points(population)
x_failed = x[is_failed, :]
x = x[~is_failed, :]
y = y[~is_failed, :]
return x, x_failed, y
def _split_y(self, y: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Split outputs (y) into F, G, H"""
f, y = np.split(y, [self._problem.n_obj], axis=1)
if self._problem.n_ieq_constr > 0:
g, y = np.split(y, [self._problem.n_ieq_constr], axis=1)
else:
g = np.zeros((y.shape[0], 0))
if self._problem.n_eq_constr > 0:
h = y[:, :self._problem.n_eq_constr]
else:
h = np.zeros((y.shape[0], 0))
return f, g, h
def _flip_g(self, y: np.ndarray):
f, g, h = self._split_y(y)
g = -g
return np.column_stack([f, g, h])
def get_population(self, x: np.ndarray, y: np.ndarray) -> Population:
# Inequality constraint values are flipped to correctly calculate constraint violation values in pymoo
f, g, h = self._split_y(y)
kwargs = {'X': x, 'F': f, 'G': g, 'H': h}
pop = Population.new(**kwargs)
return pop
@staticmethod
def _get_pareto_front(population: Population) -> Population:
f = population.get('F')
if f.shape[0] == 0:
return population.copy()
f = f[population.get('feas')]
if f.shape[0] == 0:
return population.copy()
i_nds = NonDominatedSorting().do(f, only_non_dominated_front=True)
return population[i_nds]
|
/sb_arch_opt-1.1.5-py3-none-any.whl/sb_arch_opt/algo/segomoe_interface/algo.py
| 0.809238 | 0.223568 |
algo.py
|
pypi
|
import numpy as np
from pymoo.core.variable import Real
from pymoo.problems.single.himmelblau import Himmelblau as HB
from pymoo.problems.single.rosenbrock import Rosenbrock as RB
from pymoo.problems.single.griewank import Griewank as GW
from sb_arch_opt.problems.problems_base import *
__all__ = ['Himmelblau', 'Rosenbrock', 'Griewank', 'Goldstein', 'Branin']
class Himmelblau(NoHierarchyWrappedProblem):
def __init__(self):
super().__init__(HB())
class Rosenbrock(NoHierarchyWrappedProblem):
def __init__(self, n_var=10):
super().__init__(RB(n_var=n_var))
class Griewank(NoHierarchyWrappedProblem):
def __init__(self):
super().__init__(GW(n_var=10))
class Goldstein(NoHierarchyProblemBase):
"""Goldstein-Price test problem, implementation based on
https://github.com/scipy/scipy/blob/main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_G.py#L88"""
def __init__(self):
des_vars = [Real(bounds=(-2, 2)) for _ in range(2)]
super().__init__(des_vars)
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
a = (1 + (x[:, 0] + x[:, 1] + 1) ** 2
* (19 - 14 * x[:, 0] + 3 * x[:, 0] ** 2
- 14 * x[:, 1] + 6 * x[:, 0] * x[:, 1] + 3 * x[:, 1] ** 2))
b = (30 + (2 * x[:, 0] - 3 * x[:, 1]) ** 2
* (18 - 32 * x[:, 0] + 12 * x[:, 0] ** 2
+ 48 * x[:, 1] - 36 * x[:, 0] * x[:, 1] + 27 * x[:, 1] ** 2))
f_out[:, 0] = a*b
class Branin(NoHierarchyProblemBase):
"""
Branin test function from:
Forrester, A., Sobester, A., & Keane, A. (2008). Engineering design via surrogate modelling: a practical guide.
"""
_des_vars = [
Real(bounds=(0, 1)), Real(bounds=(0, 1)),
]
def __init__(self):
super().__init__(self._des_vars)
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
for i in range(x.shape[0]):
f_out[i, 0] = self._h(x[i, 0], x[i, 1])
@staticmethod
def _h(x1, x2):
t1 = (15*x2 - (5/(4*np.pi**2))*(15*x1-5)**2 + (5/np.pi)*(15*x1-5) - 6)**2
t2 = 10*(1-1/(8*np.pi))*np.cos(15*x1-5) + 10
return ((t1+t2)-54.8104)/51.9496
if __name__ == '__main__':
Himmelblau().print_stats()
Rosenbrock().print_stats()
Griewank().print_stats()
Goldstein().print_stats()
Branin().print_stats()
# Branin().plot_design_space()
|
/sb_arch_opt-1.1.5-py3-none-any.whl/sb_arch_opt/problems/continuous.py
| 0.782205 | 0.482734 |
continuous.py
|
pypi
|
import numpy as np
from pymoo.core.variable import Real
from pymoo.problems.multi.zdt import ZDT1
from pymoo.problems.single.himmelblau import Himmelblau as HB
from sb_arch_opt.problems.continuous import *
from sb_arch_opt.problems.problems_base import *
__all__ = ['MOHimmelblau', 'MDMOHimmelblau', 'DMOHimmelblau', 'MOGoldstein', 'MDMOGoldstein',
'DMOGoldstein', 'MOZDT1', 'MDZDT1', 'DZDT1', 'MDZDT1Small', 'MDZDT1Mid', 'MORosenbrock', 'MDMORosenbrock']
class MOHimmelblau(NoHierarchyProblemBase):
"""Multi-objective version of the Himmelblau test problem"""
def __init__(self):
self._problem = problem = HB()
des_vars = [Real(bounds=(problem.xl[i], problem.xu[i])) for i in range(problem.n_var)]
super().__init__(des_vars, n_obj=2)
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
f_out[:, 0] = self._problem.evaluate(x, return_as_dictionary=True)['F'][:, 0]
f_out[:, 1] = self._problem.evaluate(x[:, ::-1], return_as_dictionary=True)['F'][:, 0]
class MDMOHimmelblau(MixedDiscretizerProblemBase):
"""Mixed-discrete version of the multi-objective Himmelblau test problem"""
def __init__(self):
super().__init__(MOHimmelblau(), n_vars_int=1)
class DMOHimmelblau(MixedDiscretizerProblemBase):
"""Discrete version of the multi-objective Himmelblau test problem"""
def __init__(self):
super().__init__(MOHimmelblau())
class MOGoldstein(NoHierarchyProblemBase):
"""Multi-objective version of the Goldstein test problem"""
def __init__(self):
self._problem = problem = Goldstein()
super().__init__(problem.des_vars, n_obj=2)
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
f_out[:, 0] = self._problem.evaluate(x, return_as_dictionary=True)['F'][:, 0]
f_out[:, 1] = -self._problem.evaluate(x+.25, return_as_dictionary=True)['F'][:, 0]
class MDMOGoldstein(MixedDiscretizerProblemBase):
"""Mixed-discrete version of the multi-objective Goldstein test problem"""
def __init__(self):
super().__init__(MOGoldstein(), n_vars_int=1)
class DMOGoldstein(MixedDiscretizerProblemBase):
"""Discrete version of the multi-objective Goldstein test problem"""
def __init__(self):
super().__init__(MOGoldstein())
class MORosenbrock(NoHierarchyProblemBase):
"""Multi-objective version of the Rosenbrock problem"""
def __init__(self, n_var=10):
self._rosenbrock = problem = Rosenbrock(n_var=n_var)
des_vars = problem.des_vars
super().__init__(des_vars, n_obj=2)
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
out = self._rosenbrock.evaluate(x, return_as_dictionary=True)
f_out[:, 0] = f1 = out['F'][:, 0]
f_out[:, 1] = .1*(np.abs((6000-f1)/40)**2 + np.sum((x[:, :4]+1)**2*2000, axis=1))
class MDMORosenbrock(MixedDiscretizerProblemBase):
"""Mixed-discrete multi-objective Rosenbrock problem"""
def __init__(self):
super().__init__(MORosenbrock(), n_opts=4, n_vars_int=5)
class MDZDT1Small(MixedDiscretizerProblemBase):
"""Mixed-discrete version of the multi-objective ZDT1 test problem"""
def __init__(self):
super().__init__(ZDT1(n_var=12), n_opts=3, n_vars_int=6)
class MDZDT1Mid(MixedDiscretizerProblemBase):
"""Mixed-discrete version of the multi-objective ZDT1 test problem"""
def __init__(self):
super().__init__(ZDT1(n_var=20), n_opts=3, n_vars_int=10)
class MOZDT1(NoHierarchyWrappedProblem):
"""Wrapper for ZDT1 test problem"""
def __init__(self):
super().__init__(ZDT1())
class MDZDT1(MixedDiscretizerProblemBase):
"""Mixed-discrete version of the multi-objective ZDT1 test problem"""
def __init__(self):
super().__init__(ZDT1(), n_opts=5, n_vars_int=15)
class DZDT1(MixedDiscretizerProblemBase):
"""Discrete version of the multi-objective ZDT1 test problem"""
def __init__(self):
super().__init__(ZDT1(), n_opts=5)
if __name__ == '__main__':
# MOHimmelblau().print_stats()
# MDMOHimmelblau().print_stats()
# MDMOHimmelblau().plot_design_space()
# DMOHimmelblau().print_stats()
# # MOHimmelblau().plot_pf()
# # MDMOHimmelblau().plot_pf()
# DMOHimmelblau().plot_pf()
# MOGoldstein().print_stats()
# MOGoldstein().plot_design_space()
# MDMOGoldstein().print_stats()
# MDMOGoldstein().plot_design_space()
# DMOGoldstein().print_stats()
# DMOGoldstein().plot_design_space()
# # MOGoldstein().plot_pf()
# # MDMOGoldstein().plot_pf()
# DMOGoldstein().plot_pf()
MORosenbrock().print_stats()
# MORosenbrock().plot_pf()
# MDMORosenbrock().print_stats()
# MDMORosenbrock().plot_pf()
# MOZDT1().print_stats()
# MDZDT1().print_stats()
# MDZDT1Small().print_stats()
# MDZDT1Mid().print_stats()
# DZDT1().print_stats()
# # MOZDT1().plot_pf()
# # MDZDT1().plot_pf()
# DZDT1().plot_pf()
|
/sb_arch_opt-1.1.5-py3-none-any.whl/sb_arch_opt/problems/md_mo.py
| 0.712432 | 0.339472 |
md_mo.py
|
pypi
|
import itertools
import numpy as np
from typing import Optional, Tuple
from pymoo.core.problem import Problem
from pymoo.core.variable import Real, Integer
from sb_arch_opt.problem import ArchOptProblemBase
from sb_arch_opt.pareto_front import CachedParetoFrontMixin
from sb_arch_opt.sampling import HierarchicalExhaustiveSampling
__all__ = ['ArchOptTestProblemBase', 'NoHierarchyProblemBase', 'NoHierarchyWrappedProblem', 'MixedDiscretizerProblemBase']
class ArchOptTestProblemBase(CachedParetoFrontMixin, ArchOptProblemBase):
"""Helper class to extend the ArchOptProblemBase with Pareto front caching"""
def might_have_hidden_constraints(self):
"""For the test problems we know which ones have hidden constraints"""
return False
def plot_design_space(self, ix_plot=None, x_base=None, n=200, show=True):
import matplotlib.pyplot as plt
from matplotlib.colors import CenteredNorm
if ix_plot is None:
ix_plot = (0, 1)
ix, iy = ix_plot
x_name, y_name = f'$x_{ix}$', f'$x_{iy}$'
x_lim, y_lim = (self.xl[ix], self.xu[ix]), (self.xl[iy], self.xu[iy])
x, y = np.linspace(x_lim[0], x_lim[1], n), np.linspace(y_lim[0], y_lim[1], n)
xx, yy = np.meshgrid(x, y)
if x_base is None:
x_base = .5*(self.xl+self.xu)
x_eval = np.zeros((xx.size, len(x_base)))
x_eval[:, :] = x_base
x_eval[:, ix] = xx.ravel()
if self.n_var > 1:
x_eval[:, iy] = yy.ravel()
out = self.evaluate(x_eval, return_as_dictionary=True)
def _plot_out(z, z_name, is_constraint=False):
zz = z.reshape(xx.shape)
plt.figure(), plt.title(f'{self!r}\nmin = {np.nanmin(z)}')
plt.fill_between(x_lim, y_lim[0], y_lim[1], facecolor='none', hatch='X', edgecolor='r', linewidth=0)
cmap = 'RdBu_r' if is_constraint else 'summer'
kwargs = {}
if is_constraint:
kwargs['norm'] = CenteredNorm()
c = plt.contourf(xx, yy, zz, 50, cmap=cmap, **kwargs)
plt.contour(xx, yy, zz, 10, colors='k', linewidths=.5, **kwargs)
if is_constraint:
plt.contour(xx, yy, zz, [0], colors='k', linewidths=3)
plt.colorbar(c).set_label(z_name)
plt.xlabel(x_name), plt.xlim(x_lim)
plt.ylabel(y_name), plt.ylim(y_lim)
plt.tight_layout()
for if_ in range(self.n_obj):
_plot_out(out['F'][:, if_], f'$f_{if_}$')
for ig in range(self.n_ieq_constr):
_plot_out(out['G'][:, ig], f'$g_{ig}$', is_constraint=True)
for ih in range(self.n_eq_constr):
_plot_out(out['H'][:, ih], f'$h_{ih}$', is_constraint=True)
if show:
plt.show()
class NoHierarchyProblemBase(ArchOptTestProblemBase):
"""Base class for test problems that have no decision hierarchy"""
def _get_n_valid_discrete(self) -> int:
# No hierarchy, so the number of valid points is the same as the number of declared points
return self.get_n_declared_discrete()
def _get_n_active_cont_mean(self) -> int:
# No hierarchy, so the mean nr of active continuous dimensions is the same as the nr of continuous dimensions
return int(np.sum(self.is_cont_mask))
def _is_conditionally_active(self):
return [False]*self.n_var
def _gen_all_discrete_x(self) -> Optional[Tuple[np.ndarray, np.ndarray]]:
# No hierarchy, so we can just get the Cartesian product of discrete variables
x_values = HierarchicalExhaustiveSampling.get_exhaustive_sample_values(self, n_cont=1)
# Set some limit to what we want to generate
if np.prod([len(values) for values in x_values], dtype=float) > 1e6:
return
x_discrete = np.array(list(itertools.product(*x_values)))
is_active = np.ones(x_discrete.shape, dtype=bool)
return x_discrete, is_active
def __repr__(self):
return f'{self.__class__.__name__}()'
class NoHierarchyWrappedProblem(NoHierarchyProblemBase):
"""Base class for non-hierarchical test problems that wrap an existing Problem class (to add SBArchOpt features)"""
def __init__(self, problem: Problem, repr_str=None):
self._problem = problem
self._repr_str = repr_str
des_vars = [Real(bounds=(problem.xl[i], problem.xu[i])) for i in range(problem.n_var)]
super().__init__(des_vars, n_obj=problem.n_obj, n_ieq_constr=problem.n_ieq_constr)
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
out = self._problem.evaluate(x, return_as_dictionary=True)
f_out[:, :] = out['F']
if self.n_ieq_constr > 0:
g_out[:, :] = out['G']
def __repr__(self):
if self._repr_str is not None:
return self._repr_str
return f'{self.__class__.__name__}()'
class MixedDiscretizerProblemBase(NoHierarchyProblemBase):
"""Problem class that turns an existing test problem into a mixed-discrete problem by mapping the first n (if not
given: all) variables to integers with a given number of options."""
def __init__(self, problem: Problem, n_opts=10, n_vars_int: int = None):
self.problem = problem
self.n_opts = n_opts
if n_vars_int is None:
n_vars_int = problem.n_var
self.n_vars_int = n_vars_int
if not problem.has_bounds():
raise ValueError('Underlying problem should have bounds defined!')
self._xl_orig = problem.xl
self._xu_orig = problem.xu
des_vars = [Integer(bounds=(0, n_opts-1)) if i < n_vars_int else Real(bounds=(problem.xl[i], problem.xu[i]))
for i in range(problem.n_var)]
super().__init__(des_vars, n_obj=problem.n_obj, n_ieq_constr=problem.n_ieq_constr)
self.callback = problem.callback
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
"""
Implement evaluation and write results in the provided output matrices:
- x (design vectors): discrete variables have integer values, imputed design vectors can be output here
- is_active (activeness): vector specifying for each design variable whether it was active or not
- f (objectives): written as a minimization
- g (inequality constraints): written as "<= 0"
- h (equality constraints): written as "= 0"
"""
n = self.n_vars_int
xl, xu = self.xl, self.xu
xl_orig, xu_orig = self._xl_orig, self._xu_orig
x_underlying = x.copy()
x_underlying[:, :n] = ((x_underlying[:, :n]-xl[:n])/(xu[:n]-xl[:n]))*(xu_orig[:n]-xl_orig[:n])+xl_orig[:n]
out = self.problem.evaluate(x_underlying, return_as_dictionary=True, *args, **kwargs)
f_out[:, :] = out['F']
if 'G' in out:
g_out[:, :] = out['G']
def _map_x(self, x: np.ndarray) -> np.ndarray:
x = np.copy(x)
xl, xu = self.xl, self.xu
xl_orig, xu_orig = self._xl_orig, self._xu_orig
n = self.n_vars_int
x[:, :n] = ((x[:, :n]-xl[:n])/(xu[:n]-xl[:n]))*(xu_orig[:n]-xl_orig[:n])+xl_orig[:n]
return x
|
/sb_arch_opt-1.1.5-py3-none-any.whl/sb_arch_opt/problems/problems_base.py
| 0.870583 | 0.408454 |
problems_base.py
|
pypi
|
import numpy as np
from pymoo.core.variable import Real
from pymoo.problems.multi.osy import OSY
from pymoo.problems.multi.carside import Carside
from pymoo.problems.multi.welded_beam import WeldedBeam
from pymoo.problems.multi.dascmop import DASCMOP7, DIFFICULTIES
from pymoo.problems.single.cantilevered_beam import CantileveredBeam
from sb_arch_opt.problems.problems_base import *
__all__ = ['ArchCantileveredBeam', 'MDCantileveredBeam', 'ArchWeldedBeam', 'MDWeldedBeam', 'ArchCarside', 'MDCarside',
'ArchOSY', 'MDOSY', 'MODASCMOP', 'MDDASCMOP', 'ConBraninProd', 'ConBraninGomez']
class ArchCantileveredBeam(NoHierarchyWrappedProblem):
def __init__(self):
super().__init__(CantileveredBeam())
class MDCantileveredBeam(MixedDiscretizerProblemBase):
def __init__(self):
super().__init__(ArchCantileveredBeam(), n_vars_int=2)
class ArchWeldedBeam(NoHierarchyWrappedProblem):
"""Welded beam test problem: https://pymoo.org/problems/multi/welded_beam.html"""
def __init__(self):
super().__init__(WeldedBeam())
class MDWeldedBeam(MixedDiscretizerProblemBase):
"""Mixed-discrete version of the welded beam test problem"""
def __init__(self):
super().__init__(ArchWeldedBeam(), n_vars_int=2)
class ArchCarside(NoHierarchyWrappedProblem):
"""Carside test problem"""
def __init__(self):
super().__init__(Carside())
class MDCarside(MixedDiscretizerProblemBase):
"""Mixed-discrete version of the Carside test problem"""
def __init__(self):
super().__init__(ArchCarside(), n_vars_int=4)
class ArchOSY(NoHierarchyWrappedProblem):
"""OSY test problem: https://pymoo.org/problems/multi/osy.html"""
def __init__(self):
super().__init__(OSY())
class MDOSY(MixedDiscretizerProblemBase):
"""Mixed-discrete version of the OSY test problem"""
def __init__(self):
super().__init__(ArchOSY(), n_vars_int=3)
class MODASCMOP(NoHierarchyWrappedProblem):
"""A particular instance of the DAS-CMOP 3-objective test problem:
https://pymoo.org/problems/constrained/dascmop.html"""
def __init__(self):
super().__init__(DASCMOP7(DIFFICULTIES[0]))
class MDDASCMOP(MixedDiscretizerProblemBase):
"""Mixed-discrete version of the DAS-CMOP test problem"""
def __init__(self):
super().__init__(MODASCMOP(), n_opts=3, n_vars_int=15)
class ConBraninBase(NoHierarchyProblemBase):
"""
Constrained Branin function from:
Parr, J., Holden, C.M., Forrester, A.I. and Keane, A.J., 2010. Review of efficient surrogate infill sampling
criteria with constraint handling.
"""
def __init__(self):
des_vars = [
Real(bounds=(-5, 10)),
Real(bounds=(0, 15)),
]
super().__init__(des_vars, n_ieq_constr=1)
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
x_norm = (x+[5, 0])/15
for i in range(x.shape[0]):
f_out[i, 0] = self._h(x[i, 0], x[i, 1])
g_out[i, 0] = self._g(x_norm[i, 0], x_norm[i, 1])
@staticmethod
def _h(x1, x2):
t1 = (x2 - (5.1/(4*np.pi**2))*x1**2 + (5/np.pi)*x1 - 6)**2
t2 = 10*(1-1/(8*np.pi))*np.cos(x1) + 10
return t1 + t2 + 5*x2
def plot(self, show=True):
import matplotlib.pyplot as plt
xx1, xx2 = np.meshgrid(np.linspace(-5, 10, 100), np.linspace(0, 15, 100))
out = self.evaluate(np.column_stack([xx1.ravel(), xx2.ravel()]), return_as_dictionary=True)
zz = out['F'][:, 0]
zz[out['G'][:, 0] > 0] = np.nan
plt.figure(), plt.title(f'{self.__class__.__name__}')
plt.colorbar(plt.contourf(xx1, xx2, zz.reshape(xx1.shape), 50, cmap='inferno'))
plt.xlabel('$x_1$'), plt.ylabel('$x_2$')
if show:
plt.show()
def _g(self, x1, x2):
raise NotImplementedError
class ConBraninProd(ConBraninBase):
"""Constrained Branin problem with the product constraint (Eq. 14)"""
def _g(self, x1, x2):
return .2 - x1*x2
class ConBraninGomez(ConBraninBase):
"""Constrained Branin problem with the Gomez#3 constraint (Eq. 15)"""
def _g(self, x1, x2):
x1 = x1*2-1
x2 = x2*2-1
g = (4 - 2.1*x1**2 + (x1**4)/3)*x1**2 + x1*x2 + (-4 + 4*x2**2)*x2**2 + 3*np.sin(6*(1-x1)) + 3*np.sin(6*(1-x2))
return 6-g
if __name__ == '__main__':
# ArchCantileveredBeam().print_stats()
# ArchCantileveredBeam().plot_design_space()
# MDCantileveredBeam().print_stats()
# ArchWeldedBeam().print_stats()
# MDWeldedBeam().print_stats()
# # ArchWeldedBeam().plot_pf()
# # MDWeldedBeam().plot_pf()
# ArchCarside().print_stats()
# MDCarside().print_stats()
# # ArchCarside().plot_pf()
# MDCarside().plot_pf()
# ArchOSY().print_stats()
# MDOSY().print_stats()
# # ArchOSY().plot_pf()
# MDOSY().plot_pf()
# MODASCMOP().print_stats()
# MDDASCMOP().print_stats()
# # MODASCMOP().plot_pf()
# MDDASCMOP().plot_pf()
# ConBraninProd().plot()
ConBraninProd().print_stats()
# ConBraninGomez().plot()
ConBraninGomez().print_stats()
|
/sb_arch_opt-1.1.5-py3-none-any.whl/sb_arch_opt/problems/constrained.py
| 0.823257 | 0.380989 |
constrained.py
|
pypi
|
import numpy as np
import matplotlib.pyplot as plt
from pymoo.core.population import Population
from pymoo.core.variable import Real, Choice, Integer
from sb_arch_opt.problems.continuous import Branin
from sb_arch_opt.problems.problems_base import *
__all__ = ['MDBranin', 'AugmentedMDBranin', 'MDGoldstein', 'MunozZunigaToy', 'Halstrup04']
class MDBranin(Branin):
"""
Mixed-discrete version of the Branin problem that introduces two discrete variables that transform the original
Branin space in different ways.
Implementation based on:
Pelamatti 2020: "Overview and Comparison of Gaussian Process-Based Surrogate Models for Mixed Continuous and
Discrete Variables", section 4.1
"""
_des_vars = [
Real(bounds=(0, 1)), Real(bounds=(0, 1)),
Choice(options=[0, 1]), Choice(options=[0, 1]),
]
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
for i in range(x.shape[0]):
h = self._h(x[i, 0], x[i, 1])
z1, z2 = x[i, 2], x[i, 3]
if z1 == 0:
f_out[i, 0] = h if z2 == 0 else (.4*h + 1.1)
else:
f_out[i, 0] = (-.75*h + 5.2) if z2 == 0 else (-.5*h - 2.1)
def plot(self, z1=0, z2=0, show=True):
xx, yy = np.meshgrid(np.linspace(0, 1, 50), np.linspace(0, 1, 50))
x = np.column_stack([xx.ravel(), yy.ravel(), np.ones((xx.size,))*z1, np.ones((xx.size,))*z2])
out = Population.new(X=x)
out = self.evaluate(x, out)
ff = out.reshape(xx.shape)
plt.figure(), plt.title('Discrete Branin: $z_1$ = %d, $z_2$ = %d' % (z1, z2))
plt.colorbar(plt.contourf(xx, yy, ff, 50, cmap='viridis'))
plt.xlabel('$x_1$'), plt.ylabel('$x_2$')
plt.xlim([0, 1]), plt.ylim([0, 1])
if show:
plt.show()
class AugmentedMDBranin(MDBranin):
"""
Mixed-discrete version of the Branin function with more continuous input dimensions.
Implementation based on:
Pelamatti 2020: "Overview and Comparison of Gaussian Process-Based Surrogate Models for Mixed Continuous and
Discrete Variables", section 4.2
"""
_des_vars = [Real(bounds=(0, 1)) if i < 10 else Choice(options=[0, 1]) for i in range(12)]
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
for i in range(x.shape[0]):
h = sum([self._h(x[i, j], x[i, j+1]) for j in range(0, 10, 2)])
z1, z2 = x[i, 2], x[i, 3]
if z1 == 0:
f_out[i, 0] = h if z2 == 0 else (.4*h + 1.1)
else:
f_out[i, 0] = (-.75*h + 5.2) if z2 == 0 else (-.5*h - 2.1)
class MDGoldstein(NoHierarchyProblemBase):
"""
Mixed-discrete version of the Goldstein problem that introduces two discrete variables that transform the original
design space in different ways.
Implementation based on:
Pelamatti 2020: "Overview and Comparison of Gaussian Process-Based Surrogate Models for Mixed Continuous and
Discrete Variables", section 4.1
"""
def __init__(self):
des_vars = [
Real(bounds=(0, 100)), Real(bounds=(0, 100)),
Integer(bounds=(0, 2)), Integer(bounds=(0, 2)),
]
super().__init__(des_vars)
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
_x3 = [20, 50, 80]
_x4 = [20, 50, 80]
for i in range(x.shape[0]):
x3, x4 = _x3[int(x[i, 2])], _x4[int(x[i, 3])]
f_out[i, 0] = self.h(x[i, 0], x[i, 1], x3, x4)
@staticmethod
def h(x1, x2, x3, x4, z3=4, z4=3):
return sum([
53.3108,
.184901 * x1,
-5.02914 * x1**3 * 1e-6,
7.72522 * x1**z3 * 1e-8,
0.0870775 * x2,
-0.106959 * x3,
7.98772 * x3**z4 * 1e-6,
0.00242482 * x4,
1.32851 * x4**3 * 1e-6,
-0.00146393 * x1 * x2,
-0.00301588 * x1 * x3,
-0.00272291 * x1 * x4,
0.0017004 * x2 * x3,
0.0038428 * x2 * x4,
-0.000198969 * x3 * x4,
1.86025 * x1 * x2 * x3 * 1e-5,
-1.88719 * x1 * x2 * x4 * 1e-6,
2.50923 * x1 * x3 * x4 * 1e-5,
-5.62199 * x2 * x3 * x4 * 1e-5,
])
def plot(self, z1=0, z2=0, show=True):
xx, yy = np.meshgrid(np.linspace(0, 100, 50), np.linspace(0, 100, 50))
x = np.column_stack([xx.ravel(), yy.ravel(), np.ones((xx.size,))*z1, np.ones((xx.size,))*z2])
out = Population.new(X=x)
out = self.evaluate(x, out)
ff = out.reshape(xx.shape)
plt.figure(), plt.title('Discrete Goldstein: $z_1$ = %d, $z_2$ = %d' % (z1, z2))
plt.colorbar(plt.contourf(xx, yy, ff, 50, cmap='viridis'))
plt.xlabel('$x_1$'), plt.ylabel('$x_2$')
plt.xlim([0, 100]), plt.ylim([0, 100])
if show:
plt.show()
class MunozZunigaToy(NoHierarchyProblemBase):
"""
Toy problem from:
Munoz Zuniga 2020: "Global optimization for mixed categorical-continuous variables based on Gaussian process models
with a randomized categorical space exploration step", 10.1080/03155986.2020.1730677
Minimum: -2.329605
"""
def __init__(self):
des_vars = [Real(bounds=(0, 1)), Integer(bounds=(0, 9))]
super().__init__(des_vars)
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
f = [
lambda x_: np.cos(3.6 * np.pi * (x_ - 2)) + x_ - 1,
lambda x_: 2 * np.cos(1.1 * np.pi * np.exp(x_)) - .5 * x_ + 2,
lambda x_: np.cos(2 * np.pi * x_) + .5 * x_,
lambda x_: x_ * (np.cos(3.4 * np.pi * (x_ - 1)) - .5 * (x_ - 1)),
lambda x_: -.5 * x_ ** 2,
lambda x_: 2 * np.cos(.25 * np.pi * np.exp(-x_ ** 4)) ** 2 - .5 * x_ + 1,
lambda x_: x_ * np.cos(3.4 * np.pi * x_) - .5 * x_ + 1,
lambda x_: x_ * (-np.cos(7 * .5 * np.pi * x_) - .5 * x_) + 2,
lambda x_: -.5 * x_ ** 5 + 1,
lambda x_: -np.cos(5 * .5 * np.pi * x_) ** 2 * np.sqrt(x_) - .5 * np.log(x_ + .5) - 1.3,
]
for i in range(10):
i_x = x[:, 1] == i
if len(np.where(i_x)[0] > 0):
f_out[i_x, 0] = f[i](x[i_x, 0])
def plot(self, show=True):
x = np.linspace(0, 1, 100)
z = np.array(list(range(10)))
xx, zz = np.meshgrid(x, z)
xx = xx.ravel()
zz = zz.ravel()
out = Population.new(X=np.column_stack([xx, zz]))
out = self.evaluate(out.get('X'), out)
f = out[:]
plt.figure(), plt.title('Munoz-Zuniga Toy Problem')
for i in z:
i_x = zz == i
plt.plot(x, f[i_x], linewidth=1, label='$z = %d$' % (i+1,))
plt.xlim([0, 1]), plt.xlabel('$x$'), plt.ylabel('$f$'), plt.legend()
if show:
plt.show()
class Halstrup04(NoHierarchyProblemBase):
"""
Fourth mixed-discrete test problem from:
Halstrup 2016, "Black-Box Optimization of Mixed Discrete-Continuous Optimization Problems"
Minimum: 1.7025 (https://mixed-optimization-benchmark.github.io/cases/hal04/)
Original report contains an error
"""
f_aux_mod = [
[ # a
[(1., .0), (1., .2)], # d
[(.9, .0), (1., .25)], # e
],
[ # b
[(1., .5), (.8, .0)], # d
[(.5, .0), (1., .8)], # e
],
[ # c
[(1., .9), (.5, .0)], # d
[(1., 1.), (1., 1.25)], # e
],
]
def __init__(self):
des_vars = [
Real(bounds=(0, 1)), Real(bounds=(0, 1)), Real(bounds=(0, 1)), Real(bounds=(0, 1)), Real(bounds=(0, 1)),
Choice(options=[0, 1, 2]), Choice(options=[0, 1]), Choice(options=[0, 1]),
]
super().__init__(des_vars)
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
x_ = x[:, :5]
z_ = x[:, 5:].astype(int)
d = 8
x2_term = 2**(np.arange(5)/(d-1))
f_aux = np.sum((5*x_+(1-x_))**2*x2_term, axis=1)-2.75
for i in range(z_.shape[0]):
f_aux_mul, f_aux_add = self.f_aux_mod[z_[i, 0]][z_[i, 1]][z_[i, 2]]
f_out[i, 0] = f_aux[i]*f_aux_mul + f_aux_add
if __name__ == '__main__':
MDBranin().print_stats()
AugmentedMDBranin().print_stats()
MDGoldstein().print_stats()
MunozZunigaToy().print_stats()
Halstrup04().print_stats()
# MDBranin().plot_pf()
MDBranin().plot_design_space()
# AugmentedMDBranin().plot_pf()
# MDGoldstein().plot_pf()
# MunozZunigaToy().plot_pf()
# Halstrup04().plot_pf()
|
/sb_arch_opt-1.1.5-py3-none-any.whl/sb_arch_opt/problems/discrete.py
| 0.778986 | 0.65159 |
discrete.py
|
pypi
|
import itertools
import numpy as np
from typing import List
from pymoo.core.variable import Integer, Choice
from sb_arch_opt.problems.hierarchical import HierarchyProblemBase
__all__ = ['GNCProblemBase', 'GNCNoActNrType', 'GNCNoActType', 'GNCNoActNr', 'GNCNoAct',
'GNCNoNrType', 'GNCNoType', 'GNCNoNr', 'GNC']
class GNCProblemBase(HierarchyProblemBase):
"""
Guidance, Navigation and Control architecture design problem, from chapter 15 of:
Crawley et al., "System Architecture - Strategy and Product Development for Complex Systems", 2015.
The challenge is to find the most optimal selection and connection patterns from Sensors to Computers,
and Computers to Actuators. The number and type of each element can be selected. The architecture is evaluated in
terms of reliability (more connections and more reliable components lead to more system-level reliability) and
mass (more reliable components are heavier). This is therefore a multi-objective optimization problem.
Component mass and probabilities are taken from:
Apaza & Selva, "Automatic Composition of Encoding Scheme and Search Operators
in System Architecture Optimization", 2021.
"""
_force_get_discrete_rates = False
mass = {
'S': {'A': 3., 'B': 6., 'C': 9.},
'C': {'A': 3., 'B': 5., 'C': 10.},
'A': {'A': 3.5, 'B': 5.5, 'C': 9.5},
}
failure_rate = {
'S': {'A': .00015, 'B': .0001, 'C': .00005},
'C': {'A': .0001, 'B': .00004, 'C': .00002},
'A': {'A': .00008, 'B': .0002, 'C': .0001},
}
def __init__(self, choose_nr=True, n_max=3, choose_type=True, actuators=True):
self.choose_nr = choose_nr
self.n_max = n_max
self.choose_type = choose_type
self.actuators = actuators
# If nr and types are not chosen, there is no way to vary system mass
n_obj = 2 if self.choose_nr or self.choose_type else 1
des_vars = self._get_des_vars()
super().__init__(des_vars, n_obj=n_obj)
def _get_n_valid_discrete(self) -> int:
# Pre-count the number of possible connections, taking into account that each connector at least needs one
# We can ignore any combinations where there are either 1 sources or targets, as there there is only 1
# connection possibility (namely all-to-one or one-to-all)
def _iter_conns(n_src_, n_tgt_):
# Loop over the number of outgoing connections for the current source node (at least one)
for n_conn_src in range(1, n_tgt_+1):
# Loop over the combinations of target node selections
for i_conn_targets in itertools.combinations(list(range(n_tgt_)), n_conn_src):
# Prepare connection matrix of size (n_src x n_tgt), where 1 denotes a made connection
src_conn_matrix = np.zeros((n_src_, n_tgt_), dtype=int)
src_conn_matrix[0, list(i_conn_targets)] = 1
# If we only have 1 source node left, this is the final matrix
if n_src_ == 1:
yield src_conn_matrix
continue
# Otherwise, loop over possible connection matrices by the remaining source nodes
for next_src_conn_matrix in _iter_conns(n_src_-1, n_tgt_):
conn_matrix_ = src_conn_matrix.copy()
conn_matrix_[1:, :] = next_src_conn_matrix
yield conn_matrix_
n_comb_conn = {}
for n_src, n_tgt in itertools.product(list(range(2, self.n_max+1)), list(range(2, self.n_max+1))):
# Loop over connection matrices
n_combinations = 0
for conn_matrix in _iter_conns(n_src, n_tgt):
# Check if all nodes have at least one connection
if np.any(np.sum(conn_matrix, axis=0) == 0) or np.any(np.sum(conn_matrix, axis=1) == 0):
continue
n_combinations += 1
n_comb_conn[n_src, n_tgt] = n_comb_conn[n_tgt, n_src] = n_combinations
# Loop over the number of object instances
n_node_exist = list(range(1, self.n_max+1)) if self.choose_nr else [self.n_max]
n_actuators = n_node_exist if self.actuators else [0]
n_valid = 0
for n_objs in itertools.product(n_node_exist, n_node_exist, n_actuators):
# Count the number of possible type selections
n_inst_comb = 1
if self.choose_type:
for n in n_objs:
if n > 0:
n_inst_comb *= len(list(itertools.combinations_with_replacement('ABC', n)))
# Count the number of possible inter-object connections
for n_src, n_tgt in zip(n_objs[:-1], n_objs[1:]):
# If there are no targets (actuators) to connect to, or there is only 1 of either type skip as there are
# no additional combinations possible
if n_tgt == 0:
continue
if n_src == 1 or n_tgt == 1:
continue
n_inst_comb *= n_comb_conn[n_src, n_tgt]
n_valid += n_inst_comb
return n_valid
def _get_des_vars(self):
des_vars = []
# Choose the nr of sensors, computers, [actuators]
# We simply define one integer design variable per object type to select the nr of instances
n_obj_types = 3 if self.actuators else 2
if self.choose_nr:
for _ in range(n_obj_types):
des_vars.append(Integer(bounds=(1, self.n_max)))
# Choose the type of the objects (A, B, or C)
# Here the thing is that we should select types without including permutations of these selections, otherwise
# the opportunity arises of defining duplicate (in terms of performance) architectures:
# [A] --> [B] is equivalent to [B] -\/-> [B] and [B] --> [C]
# [B] --> [C] [A] -/\-> [C] [A] --> [B]
# therefore, the type selection choices should only represent unordered combinations:
# AAA, AAB, AAC, ABB, ABC, ACC, BBB, BBC, BCC, CCC
# The best way to represent this is by having one categorical design variable per object instance and then
# correcting the design variable values to only represent unordered combinations
if self.choose_type:
for _ in range(n_obj_types):
des_vars += [Choice(options=['A', 'B', 'C']) for _ in range(self.n_max)]
# Choose the connections among objects
# Here we assign each possible connection edge to one categorical design variable (yes/no), representing whether
# the connection is established or not; the constraint that each object should have at least one connection is
# enforced by repair/imputation
for _ in range(n_obj_types):
des_vars += [Choice(options=[False, True]) for _ in range(self.n_max*self.n_max)]
return des_vars
def _is_conditionally_active(self) -> List[bool]:
# If we do not choose the number of objects, all variables are always active
if not self.choose_nr:
return [False]*self.n_var
is_cond_act = [True]*self.n_var
n_obj_types = 3 if self.actuators else 2
i_dv = 0
for _ in range(n_obj_types):
# Choose nr of obj is always active
is_cond_act[i_dv] = False
i_dv += 1
if self.choose_type:
for _ in range(n_obj_types):
for i in range(self.n_max):
# Choose type is active is not choose nr OR for the first instance of each object type
if i == 0:
is_cond_act[i_dv] = False
i_dv += 1
for _ in range(n_obj_types):
for i in range(self.n_max):
for j in range(self.n_max):
# Connections between the first object instances are always active
if i == 0 and j == 0:
is_cond_act[i_dv] = False
i_dv += 1
return is_cond_act
def _correct_x(self, x: np.ndarray, is_active: np.ndarray):
n_obj_types = 3 if self.actuators else 2
n_x_conn = self.n_max*self.n_max
for i, x_i in enumerate(x):
j = 0
# Get the number of instantiated objects
if self.choose_nr:
n_inst = x_i[j:j+n_obj_types].astype(int)
j += n_obj_types
else:
n_inst = np.ones((n_obj_types,), dtype=int)*self.n_max
# Correct the object types
if self.choose_type:
for n_obj in n_inst:
x_obj_type = x_i[j:j+self.n_max]
# Set type selections for non-instantiated objects to inactive
is_active[j+n_obj:j+self.n_max] = False
# Correct types for instantiated objects to only select unordered combinations: subsequent variables
# cannot have a lower value than prior ones
last_type_sel = x_obj_type[0]
for dj in range(1, n_obj):
if x_obj_type[dj] < last_type_sel:
x_i[j+dj] = last_type_sel
else:
last_type_sel = x_obj_type[dj]
j += self.n_max
# Correct the connections
for i_conn in range(n_obj_types-1):
x_conn = x_i[j:j+n_x_conn].reshape((self.n_max, self.n_max))
is_active_conn = x_i[j:j+n_x_conn].reshape((self.n_max, self.n_max))
# Deactivate connections for non-instantiated objects
n_src, n_tgt = n_inst[i_conn], n_inst[i_conn+1]
is_active_conn[n_src:, :] = False
is_active_conn[:, n_tgt:] = False
# Ensure that each connector has at least one connection
x_conn_active = x_conn[:n_src, :n_tgt]
for i_src, n_conn_src in enumerate(np.sum(x_conn_active, axis=1)):
if n_conn_src == 0:
# Select the same target as the source to make a connection, or the last available
i_tgt = min(i_src, n_tgt-1)
x_conn_active[i_src, i_tgt] = 1
for i_tgt, n_conn_tgt in enumerate(np.sum(x_conn_active, axis=0)):
if n_conn_tgt == 0:
i_src = min(i_tgt, n_src-1)
x_conn_active[i_src, i_tgt] = 1
j += n_x_conn
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
# First correct the design variable so that only valid architectures are evaluated
self._correct_x_impute(x, is_active_out)
# Get object-type labels
n_obj_types = 3 if self.actuators else 2
j = n_obj_types if self.choose_nr else 0
obj_type_cat_values = [[self.get_categorical_values(x, j+i_obj*self.n_max+dj) for dj in range(self.n_max)]
for i_obj in range(n_obj_types)]
# Loop over architectures
n_x_conn = self.n_max*self.n_max
for i, x_i in enumerate(x):
j = 0
# Get the number of instantiated objects
if self.choose_nr:
n_inst = x_i[j:j+n_obj_types].astype(int)
j += n_obj_types
else:
n_inst = np.ones((n_obj_types,), dtype=int)*self.n_max
# Get the object types
obj_types = []
if self.choose_type:
for i_obj, n_obj in enumerate(n_inst):
obj_types.append([obj_type_cat_values[i_obj][dj][i] for dj in range(n_obj)])
j += self.n_max
else:
types = ['A', 'B', 'C']
for n in n_inst:
type_cycle = itertools.cycle(types)
obj_types.append([next(type_cycle) for _ in range(n)])
# Get the connections
conn_edges = []
for i_conn in range(n_obj_types-1):
x_conn = x_i[j:j+n_x_conn].reshape((self.n_max, self.n_max)).astype(bool)
edges = []
n_src, n_tgt = n_inst[i_conn], n_inst[i_conn+1]
for i_src in range(n_src):
for i_tgt in range(n_tgt):
if x_conn[i_src, i_tgt]:
edges.append((i_src, i_tgt))
conn_edges.append(edges)
j += n_x_conn
# Calculate metrics
mass = self._calc_mass(obj_types[0], obj_types[1], actuator_types=obj_types[2] if self.actuators else None)
failure_rate = self._calc_failure_rate(obj_types[0], obj_types[1], conn_edges[0],
actuator_types=obj_types[2] if self.actuators else None,
act_conns=conn_edges[1] if self.actuators else None)
f_out[i, 0] = failure_rate
if f_out.shape[1] > 1:
f_out[i, 1] = mass
@classmethod
def _calc_mass(cls, sensor_types, computer_types, actuator_types=None):
mass = sum([cls.mass['S'][type_] for type_ in sensor_types])
mass += sum([cls.mass['C'][type_] for type_ in computer_types])
if actuator_types is not None:
mass += sum([cls.mass['A'][type_] for type_ in actuator_types])
return mass
@classmethod
def _calc_failure_rate(cls, sensor_types, computer_types, conns, actuator_types=None, act_conns=None):
# Get item failure rates
rate = cls.failure_rate
failure_rates = [np.array([rate['S'][type_] for type_ in sensor_types]),
np.array([rate['C'][type_] for type_ in computer_types])]
obj_conns = [conns]
if actuator_types is not None:
failure_rates.append(np.array([rate['A'][type_] for type_ in actuator_types]))
obj_conns.append(act_conns)
conn_matrices = []
for i, edges in enumerate(obj_conns):
matrix = np.zeros((len(failure_rates[i]), len(failure_rates[i+1])), dtype=int)
for i_src, i_tgt in edges:
matrix[i_src, i_tgt] = 1
conn_matrices.append(matrix)
# Loop over combinations of failed components
def _branch_failures(i_rates=0, src_connected_mask=None) -> float:
calc_downstream = i_rates < len(conn_matrices)-1
rates, tgt_rates = failure_rates[i_rates], failure_rates[i_rates+1]
conn_mat = conn_matrices[i_rates]
# Loop over failure scenarios
if src_connected_mask is None:
src_connected_mask = np.ones((len(rates),), dtype=bool)
total_rate = 0.
for ok_sources in itertools.product(*[([False, True] if src_connected_mask[i_conn] else [False])
for i_conn in range(len(rates))]):
if i_rates > 0 and not any(ok_sources):
continue
# Calculate probability of this scenario occurring
ok_sources = list(ok_sources)
occurrence_prob = rates.copy()
occurrence_prob[ok_sources] = 1-occurrence_prob[ok_sources]
prob = 1.
for partial_prob in occurrence_prob[src_connected_mask]:
prob *= partial_prob
occurrence_prob = prob
# Check which targets are still connected in this scenario
conn_mat_ok = conn_mat[ok_sources, :].T
connected_targets = np.zeros((conn_mat_ok.shape[0],), dtype=bool)
for i_conn_tgt in range(conn_mat_ok.shape[0]):
connected_targets[i_conn_tgt] = np.any(conn_mat_ok[i_conn_tgt])
# If no connected targets are available the system fails
tgt_failure_rates = tgt_rates[connected_targets]
if len(tgt_failure_rates) == 0:
total_rate += occurrence_prob
continue
# Calculate the probability that the system fails because all remaining connected targets fail
all_tgt_fail_prob = 1.
for prob in tgt_failure_rates:
all_tgt_fail_prob *= prob
total_rate += occurrence_prob*all_tgt_fail_prob
# Calculate the probability that the system fails because remaining downstream connected targets fail
if calc_downstream:
total_rate += occurrence_prob*_branch_failures(
i_rates=i_rates+1, src_connected_mask=connected_targets)
return total_rate
failure_rate = _branch_failures()
return np.log10(failure_rate)
def __repr__(self):
return f'{self.__class__.__name__}(choose_nr={self.choose_nr}, n_max={self.n_max}, ' \
f'choose_type={self.choose_type}, actuators={self.actuators})'
class GNCNoActNrType(GNCProblemBase):
def __init__(self):
super().__init__(choose_type=False, choose_nr=False, actuators=False)
def __repr__(self):
return f'{self.__class__.__name__}()'
class GNCNoActType(GNCProblemBase):
def __init__(self):
super().__init__(choose_type=False, actuators=False)
def __repr__(self):
return f'{self.__class__.__name__}()'
class GNCNoActNr(GNCProblemBase):
def __init__(self):
super().__init__(choose_nr=False, actuators=False)
def __repr__(self):
return f'{self.__class__.__name__}()'
class GNCNoAct(GNCProblemBase):
def __init__(self):
super().__init__(actuators=False)
def __repr__(self):
return f'{self.__class__.__name__}()'
class GNCNoNrType(GNCProblemBase):
def __init__(self):
super().__init__(choose_type=False, choose_nr=False)
def __repr__(self):
return f'{self.__class__.__name__}()'
class GNCNoType(GNCProblemBase):
def __init__(self):
super().__init__(choose_type=False)
def __repr__(self):
return f'{self.__class__.__name__}()'
class GNCNoNr(GNCProblemBase):
def __init__(self):
super().__init__(choose_nr=False)
def __repr__(self):
return f'{self.__class__.__name__}()'
class GNC(GNCProblemBase):
def __init__(self):
super().__init__()
def __repr__(self):
return f'{self.__class__.__name__}()'
if __name__ == '__main__':
# GNCProblemBase().print_stats()
# GNCProblemBase().plot_pf()
GNCNoActNrType().print_stats()
GNCNoActType().print_stats()
GNCNoActNr().print_stats()
GNCNoAct().print_stats()
GNCNoNrType().print_stats()
GNCNoType().print_stats()
GNCNoNr().print_stats()
GNC().print_stats()
# GNCNoAct().plot_pf()
GNC().plot_pf()
|
/sb_arch_opt-1.1.5-py3-none-any.whl/sb_arch_opt/problems/gnc.py
| 0.847779 | 0.507507 |
gnc.py
|
pypi
|
import numpy as np
from deprecated import deprecated
from scipy.spatial import distance
from pymoo.core.variable import Real
from sb_arch_opt.problems.constrained import *
from sb_arch_opt.problems.hierarchical import *
from sb_arch_opt.problems.problems_base import *
from sb_arch_opt.problems.continuous import Branin, Rosenbrock
from sb_arch_opt.problem import ArchOptProblemBase
from sb_arch_opt.sampling import HierarchicalSampling
from pymoo.problems.single.ackley import Ackley
__all__ = ['SampledFailureRateMixin', 'Mueller01', 'Mueller02', 'Mueller08', 'MOMueller08', 'Alimo', 'HCBranin',
'MOHierarchicalRosenbrockHC', 'HCMOHierarchicalTestProblem', 'RandomHiddenConstraintsBase', 'HCSphere',
'HierarchicalRosenbrockHC', 'ConstraintHiderMetaProblem', 'CantileveredBeamHC', 'MDCantileveredBeamHC',
'CarsideHC', 'MDCarsideHC', 'CarsideHCLess', 'MDMueller02', 'MDMueller08', 'MDMOMueller08',
'HierMueller02', 'HierMueller08', 'MOHierMueller08', 'AlimoEdge', 'HierAlimo', 'HierAlimoEdge',
'Tfaily01', 'Tfaily02', 'Tfaily03', 'Tfaily04']
class SampledFailureRateMixin(ArchOptProblemBase):
"""Mixin to determine the failure rate by monte-carlo sampling"""
n_samples_failure_rate = 10000
def get_failure_rate(self) -> float:
x = HierarchicalSampling().do(self, self.n_samples_failure_rate).get('X')
out = self.evaluate(x, return_as_dictionary=True)
is_failed = self.get_failed_points(out)
return np.sum(is_failed)/len(is_failed)
def might_have_hidden_constraints(self):
return True
class Mueller01(SampledFailureRateMixin, NoHierarchyProblemBase):
"""
Test problem 1 (several disconnected failure regions) of:
https://pubsonline.informs.org/doi/suppl/10.1287/ijoc.2018.0864/suppl_file/ijoc.2018.0864.sm1.pdf
Citation: Mueller, J., Day, M. "Surrogate Optimization of Computationally Expensive Black-Box Problems with Hidden
Constraints", 2019, DOI: https://doi.org/10.1287/ijoc.2018.0864
"""
def __init__(self, n_var=5):
self._ackley = a = Ackley(n_var=n_var)
des_vars = [Real(bounds=(-10, 10)) for _ in range(a.n_var)]
super().__init__(des_vars)
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
f_out[:, :] = self._ackley.evaluate(x, return_as_dictionary=True)['F']
def _is_failed(ix):
for i_dv in range(x.shape[1]):
if -.2 <= x[ix, i_dv] <= .2:
return True
cx = np.sum(x[ix, :]*(np.sin(x[ix, :]) + .1))
return cx > 0
for i in range(x.shape[0]):
if _is_failed(i):
f_out[i, :] = np.nan
class Mueller02(SampledFailureRateMixin, NoHierarchyProblemBase):
"""
Test problem 2 (one failure region) of:
https://pubsonline.informs.org/doi/suppl/10.1287/ijoc.2018.0864/suppl_file/ijoc.2018.0864.sm1.pdf
Citation: Mueller, J., Day, M. "Surrogate Optimization of Computationally Expensive Black-Box Problems with Hidden
Constraints", 2019, DOI: https://doi.org/10.1287/ijoc.2018.0864
"""
def __init__(self):
des_vars = [Real(bounds=(-3*np.pi, 3*np.pi)) for _ in range(4)]
super().__init__(des_vars)
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
f_out[:, 0] = (x[:, 0] - x[:, 1])**2 + \
np.exp((1 - np.sin(x[:, 0]))**2) * np.cos(x[:, 1]) + \
np.exp((1 - np.cos(x[:, 1]))**2) * np.sin(x[:, 0])
x_min_term = np.sqrt(np.abs(x[:, [0]] - x + 1))
x_plus_term = np.sqrt(np.abs(x[:, [0]] + x + 1))
cx = np.sum((x * np.sin(x_min_term) * np.cos(x_plus_term)) +
((x[:, [0]] + 1) * np.sin(x_plus_term) * np.cos(x_min_term)), axis=1) - 5
f_out[cx > 0, :] = np.nan
class MDMueller02(SampledFailureRateMixin, MixedDiscretizerProblemBase):
"""Mixed-discrete version of the Mueller 2 problem"""
def __init__(self):
super().__init__(Mueller02(), n_opts=6, n_vars_int=2)
class HierMueller02(SampledFailureRateMixin, TunableHierarchicalMetaProblem):
"""Hierarchical Mueller 2 problem"""
def __init__(self):
super().__init__(lambda n: Mueller02(), imp_ratio=6., n_subproblem=20, diversity_range=.5)
class Mueller08(SampledFailureRateMixin, NoHierarchyProblemBase):
"""
Test problem 8 (one failure region) of:
https://pubsonline.informs.org/doi/suppl/10.1287/ijoc.2018.0864/suppl_file/ijoc.2018.0864.sm1.pdf
Equation (8c) is modified a bit to make the non-failed region a bit larger.
Citation: Mueller, J., Day, M. "Surrogate Optimization of Computationally Expensive Black-Box Problems with Hidden
Constraints", 2019, DOI: https://doi.org/10.1287/ijoc.2018.0864
"""
_mo = False
def __init__(self, n_var=10):
des_vars = [Real(bounds=(-10, 10)) for _ in range(n_var)]
super().__init__(des_vars, n_obj=2 if self._mo else 1)
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
inner_sum = [np.sum(j*np.sin((j+1)*x) + j, axis=1) for j in range(1, 6)]
f_out[:, 0] = np.sum(np.column_stack(inner_sum), axis=1)
if self._mo:
inner_sum = [np.sum(j*np.cos((j+1)*x[:, :-1]) + j, axis=1) for j in range(1, 6)]
f_out[:, 1] = np.sum(np.column_stack(inner_sum), axis=1)
f_out[:, 1] -= .2*f_out[:, 0]
cx = np.sum(x**4 - 16*x**2 + 5*x, axis=1) - 1000*self.n_var
f_out[cx > 0, :] = np.nan
class MOMueller08(Mueller08):
"""Multi-objective version of the Mueller 8 test problem"""
_mo = True
class MDMueller08(SampledFailureRateMixin, MixedDiscretizerProblemBase):
"""Mixed-discrete version of the Mueller 8 problem"""
def __init__(self):
super().__init__(Mueller08(), n_opts=10, n_vars_int=2)
class MDMOMueller08(SampledFailureRateMixin, MixedDiscretizerProblemBase):
"""Mixed-discrete, multi-objective version of the Mueller 8 problem"""
def __init__(self):
super().__init__(MOMueller08(), n_opts=10, n_vars_int=2)
class HierMueller08(SampledFailureRateMixin, TunableHierarchicalMetaProblem):
"""Hierarchical Mueller 8 problem"""
def __init__(self):
super().__init__(lambda n: Mueller08(), imp_ratio=6., n_subproblem=20, diversity_range=.5)
class MOHierMueller08(SampledFailureRateMixin, TunableHierarchicalMetaProblem):
"""Multi-objective hierarchical Mueller 8 problem"""
def __init__(self):
super().__init__(lambda n: MOMueller08(), imp_ratio=6., n_subproblem=20, diversity_range=.5)
class Alimo(SampledFailureRateMixin, Branin):
"""
Modified test problem used by:
Alimo et al. "Delaunay-based global optimization in nonconvex domains defined by hidden constraints", 2018,
DOI: 10.1007/978-3-319-89890-2_17
The underlying problem is replaced by the Branin function.
"""
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
# x0 = [.19, .29] # In the paper, no other reference point is given
# f_out[:, 0] = np.sum(np.abs(x - x0)**2, axis=1) - .024*self.n_var
super()._arch_evaluate(x, is_active_out, f_out, g_out, h_out, *args, **kwargs)
# The term of -.25 is added
x_fail = (x-self.xl)/(self.xu-self.xl)
self._mod_x_fail(x_fail)
cx = (self.n_var/12) + .1*np.sum(4*(x_fail-.7)**2 - 2*np.cos(4*np.pi*(x_fail-.7)), axis=1) - .25
f_out[cx >= 0, :] = np.nan
def _mod_x_fail(self, x_fail):
x_fail[:, 1] = 1-x_fail[:, 1]
x_fail[:, 0] += .15
class AlimoEdge(Alimo):
"""
Modified Alimo/Branin problem where the optimum points lie at the edge of the failed region.
"""
def _mod_x_fail(self, x_fail):
x_fail[:, 0] -= .05
x_fail[:, 1] += .05
class HierAlimo(SampledFailureRateMixin, TunableHierarchicalMetaProblem):
"""Hierarchical Alimo problem"""
def __init__(self):
super().__init__(lambda n: Alimo(), imp_ratio=6., n_subproblem=20, diversity_range=.5)
class HierAlimoEdge(SampledFailureRateMixin, TunableHierarchicalMetaProblem):
"""Hierarchical AlimoEdge problem"""
def __init__(self):
super().__init__(lambda n: AlimoEdge(), imp_ratio=6., n_subproblem=20, diversity_range=.5)
class HCBranin(SampledFailureRateMixin, Branin):
"""
Modified Branin problem with infeasibility disk, as used in:
Gelbart et al., "Bayesian optimization with unknown constraints", arXiv preprint arXiv:1403.5607 (2014).
"""
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
super()._arch_evaluate(x, is_active_out, f_out, g_out, h_out, *args, **kwargs)
# The original function is scaled differently
c = (x[:, 0]-.5)**2 + (x[:, 1]-.5)**2
f_out[c > .22, :] = np.nan
class RandomHiddenConstraintsBase(SampledFailureRateMixin, NoHierarchyProblemBase):
"""
Base class for randomly adding failed regions to some design space.
Inspired by:
Sacher et al., "A classification approach to efficient global optimization in presence of non-computable domains",
2018, DOI: 10.1007/s00158-018-1981-8
"""
def __init__(self, des_vars, density=.25, radius=.1, seed=None, **kwargs):
self.density = density
self.radius = radius
self.seed = seed
super().__init__(des_vars, **kwargs)
self._x_failed = None
self._scale = None
def _set_failed_points(self, x: np.ndarray, f_out: np.ndarray, g_out: np.ndarray, h_out: np.ndarray):
if self._x_failed is None:
if self.seed is not None:
np.random.seed(self.seed)
x_failed = HierarchicalSampling().do(self, 100).get('X')
i_selected = np.random.choice(len(x_failed), size=int(self.density*len(x_failed)), replace=False)
self._scale = 1/(self.xu-self.xl)
self._x_failed = x_failed[i_selected, :]*self._scale
is_failed = np.any(distance.cdist(x*self._scale, self._x_failed) < self.radius, axis=1)
f_out[is_failed, :] = np.nan
g_out[is_failed, :] = np.nan
h_out[is_failed, :] = np.nan
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
"""At the end of the function, use `_set_failed_points`!"""
raise NotImplementedError
class HCSphere(RandomHiddenConstraintsBase):
"""Sphere with randomly-added hidden constraints"""
_n_vars = 2
_density = .25
def __init__(self):
des_vars = [Real(bounds=(0, 1)) for _ in range(self._n_vars)]
super().__init__(des_vars, density=self._density, radius=.1, seed=0)
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
f_out[:, 0] = np.sum((x - .5*(self.xu-self.xl))**2, axis=1)
self._set_failed_points(x, f_out, g_out, h_out)
class MOHierarchicalRosenbrockHC(SampledFailureRateMixin, MOHierarchicalRosenbrock):
"""
Adaptation of the multi-objective hierarchical Rosenbrock problem, that sets points with a large constraint
violation to NaN, simulating hidden constraints.
"""
def __init__(self):
super().__init__()
self.n_ieq_constr -= 1
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
self._correct_x_impute(x, is_active_out)
g = np.empty((x.shape[0], 2))
self._eval_f_g(x, f_out, g)
hc_violated = g[:, 1] > 0.
if self._mo:
hc_violated |= np.abs(.5 - (f_out[:, 0] / 20) % 1) > .35
hc_violated |= (f_out[:, 1] > 1000) & (np.abs(.5 - (f_out[:, 1] / 100) % 1) > .35)
f_out[hc_violated] = np.nan
g[hc_violated] = np.nan
g_out[:, 0] = g[:, 0]
class HierarchicalRosenbrockHC(MOHierarchicalRosenbrockHC):
"""Single-objective hierarchical hidden-constraints Rosenbrock problem"""
_mo = False
@deprecated(reason='Not realistic (see HierarchicalMetaProblemBase docstring)')
class HCMOHierarchicalTestProblem(SampledFailureRateMixin, HierarchicalMetaProblemBase):
"""
Multi-objective hierarchical test problem with hidden constraints:
- Only approximately 42% of design variables are active in a DOE
- Approximately 60% of solutions do not converge in a DOE (i.e. return nan --> hidden constraint)
"""
def __init__(self):
super().__init__(MOHierarchicalRosenbrockHC(), n_rep=2, n_maps=2, f_par_range=[100, 100])
def __repr__(self):
return f'{self.__class__.__name__}()'
class ConstraintHiderMetaProblem(SampledFailureRateMixin, ArchOptTestProblemBase):
"""Meta problem that turns one or more constraints of an underlying problem into hidden constraints"""
def __init__(self, problem: ArchOptTestProblemBase, i_g_hc):
self._problem = problem
self._i_g_hc = i_g_hc = np.array(i_g_hc)
n_constr = problem.n_ieq_constr
if not np.all(i_g_hc < n_constr):
raise RuntimeError(f'Unavailable constraints: {i_g_hc}')
n_constr -= len(i_g_hc)
super().__init__(problem.design_space, n_obj=problem.n_obj, n_ieq_constr=n_constr,
n_eq_constr=problem.n_eq_constr)
def _get_n_valid_discrete(self) -> int:
return self._problem.get_n_valid_discrete()
def _gen_all_discrete_x(self):
return self._problem.all_discrete_x
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
self._correct_x_impute(x, is_active_out)
out = self._problem.evaluate(x, return_as_dictionary=True)
f_out[:, :] = out['F']
if 'H' in out:
h_out[:, :] = out['H']
g = out['G']
g_out[:, :] = np.delete(g, self._i_g_hc, axis=1)
is_failed = np.any(g[:, self._i_g_hc] < 0, axis=1)
f_out[is_failed, :] = np.nan
g_out[is_failed, :] = np.nan
def _correct_x(self, x: np.ndarray, is_active: np.ndarray):
x[:, :], is_active[:, :] = self._problem.correct_x(x)
def __repr__(self):
return f'{self.__class__.__name__}()'
class CantileveredBeamHC(ConstraintHiderMetaProblem):
def __init__(self):
super().__init__(ArchCantileveredBeam(), i_g_hc=[1])
class MDCantileveredBeamHC(ConstraintHiderMetaProblem):
def __init__(self):
super().__init__(MDCantileveredBeam(), i_g_hc=[1])
class CarsideHC(ConstraintHiderMetaProblem):
def __init__(self):
super().__init__(ArchCarside(), i_g_hc=[3, 7])
class CarsideHCLess(ConstraintHiderMetaProblem):
def __init__(self):
super().__init__(ArchCarside(), i_g_hc=[6])
class MDCarsideHC(ConstraintHiderMetaProblem):
def __init__(self):
super().__init__(MDCarside(), i_g_hc=[3, 7])
class Tfaily01(SampledFailureRateMixin, NoHierarchyProblemBase):
"""
Test problem 1 from:
Tfaily et al., "Efficient Acquisition Functions for Bayesian Optimization in the Presence of Hidden Constraints",
AIAA Aviation 2023 Forum
"""
def __init__(self):
des_vars = [
Real(bounds=(-2, 2)),
Real(bounds=(-2, 2)),
]
super().__init__(des_vars)
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
def w(z):
return np.exp(-(z-1)**2) + np.exp(-.8*(z+1)**2) - .5*np.sin(8*(z+.1))
f_out[:, 0] = -w(x[:, 0])*w(x[:, 1])
# Add the 45-deg rotated ellipse as failure region
alpha = .25*np.pi
xx_ = np.cos(alpha)*x[:, 0] + np.sin(alpha)*x[:, 1]
yy_ = np.sin(alpha)*x[:, 0] - np.cos(alpha)*x[:, 1]
is_failed = (xx_/2)**2 + yy_**2 < 1
f_out[is_failed, :] = np.nan
class Tfaily02(SampledFailureRateMixin, Branin):
"""
Test problem 2 from:
Tfaily et al., "Efficient Acquisition Functions for Bayesian Optimization in the Presence of Hidden Constraints",
AIAA Aviation 2023 Forum
"""
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
super()._arch_evaluate(x, is_active_out, f_out, g_out, h_out, *args, **kwargs)
is_failed = (np.abs(x[:, 0] - .5) < .5) & (np.abs(x[:, 1] - .5) < .4)
f_out[is_failed, :] = np.nan
class Tfaily03(SampledFailureRateMixin, Rosenbrock):
"""
Test problem 3 from:
Tfaily et al., "Efficient Acquisition Functions for Bayesian Optimization in the Presence of Hidden Constraints",
AIAA Aviation 2023 Forum
"""
def __init__(self):
super().__init__(n_var=4)
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
super()._arch_evaluate(x, is_active_out, f_out, g_out, h_out, *args, **kwargs)
is_failed = np.zeros(x.shape, dtype=bool)
is_failed[:, :2] = (0 < x[:, :2]) & (x[:, :2] < 1)
is_failed[:, 2:] = (1 < x[:, 2:]) & (x[:, 2:] < 2)
is_failed = np.any(is_failed, axis=1)
f_out[is_failed, :] = np.nan
class Tfaily04(SampledFailureRateMixin, NoHierarchyProblemBase):
"""
Test problem 4 from:
Tfaily et al., "Efficient Acquisition Functions for Bayesian Optimization in the Presence of Hidden Constraints",
AIAA Aviation 2023 Forum
"""
def __init__(self):
des_vars = [Real(bounds=(-500, 500)) for _ in range(6)]
super().__init__(des_vars)
def _arch_evaluate(self, x: np.ndarray, is_active_out: np.ndarray, f_out: np.ndarray, g_out: np.ndarray,
h_out: np.ndarray, *args, **kwargs):
f_out[:, 0] = 2513.895 - np.sum(x*np.sin(np.sqrt(np.abs(x))), axis=1)
is_failed = np.any((350 < x) & (x < 420), axis=1)
f_out[is_failed, :] = np.nan
if __name__ == '__main__':
# MOHierarchicalRosenbrockHC().print_stats()
# HierarchicalRosenbrockHC().print_stats()
# MOHierarchicalRosenbrockHC().plot_pf()
# HCMOHierarchicalTestProblem().print_stats()
# HCMOHierarchicalTestProblem().plot_pf()
# Mueller01().print_stats()
# Mueller01().plot_design_space(x_base=[-.5]*5)
# Mueller01().plot_pf()
# Mueller02().print_stats()
# Mueller02().plot_design_space()
# Mueller02().plot_pf()
# Mueller08().print_stats()
# Mueller08().plot_pf()
# Mueller08().plot_design_space()
# MOMueller08().print_stats()
# MOMueller08().plot_pf()
# MDMueller02().print_stats()
# MDMueller02().plot_pf()
# MDMueller02().plot_pf()
# MDMueller08().print_stats()
# MDMOMueller08().print_stats()
# MDMOMueller08().plot_pf()
# HierMueller02().print_stats()
# HierMueller08().print_stats()
# MOHierMueller08().print_stats()
# Alimo().print_stats()
# AlimoEdge().print_stats()
# Alimo().plot_design_space()
# AlimoEdge().plot_design_space()
# HierAlimo().print_stats()
# HierAlimoEdge().print_stats()
# HCBranin().print_stats()
# HCBranin().plot_design_space()
# HCSphere().print_stats()
# HCSphere().plot_design_space()
# CantileveredBeamHC().print_stats()
# MDCantileveredBeamHC().print_stats()
# CarsideHC().print_stats()
# CarsideHCLess().print_stats()
# MDCarsideHC().print_stats()
Tfaily01().print_stats()
Tfaily02().print_stats()
Tfaily03().print_stats()
Tfaily04().print_stats()
# Tfaily04().plot_design_space()
|
/sb_arch_opt-1.1.5-py3-none-any.whl/sb_arch_opt/problems/hidden_constraints.py
| 0.783823 | 0.336004 |
hidden_constraints.py
|
pypi
|
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
|
/sb_distributions-0.1.tar.gz/sb_distributions-0.1/sb_distributions/Gaussiandistribution.py
| 0.688364 | 0.853058 |
Gaussiandistribution.py
|
pypi
|
from datetime import datetime
import json
import requests
from finance.models import Granularity
from finance.providers.provider import AssetValueProvider
class Yahoo(AssetValueProvider):
"""Fetches and parses financial data from Yahoo Finance."""
name = 'yahoo'
def __init__(self):
pass
def get_url(self, symbol):
"""Returns a URL to be fetched.
:param symbol: A symbol of a security (e.g., NVDA, MSFT)
"""
return 'https://query1.finance.yahoo.com/v8/finance/chart/{0}'
def as_timestamp(self, datetime):
return int(datetime.timestamp())
def asset_values(self, symbol, start_time, end_time,
granularity=Granularity.day):
mappings = {
Granularity.day: self.fetch_daily_data,
Granularity.min: self.fetch_data_by_minutes,
}
try:
fetcher = mappings[granularity]
except KeyError:
raise NotImplementedError
else:
rows = fetcher(symbol, start_time, end_time)
return self.filter_empty_rows(rows)
# NOTE: 'Data by day' would keep the name consistent, but 'daily data'
# sounds more natural.
def fetch_daily_data(self, symbol, start_time, end_time):
url = self.get_url(symbol)
params = {
'symbol': symbol,
'period1': self.as_timestamp(start_time),
'period2': self.as_timestamp(end_time),
'interval': '1d',
'includePrePost': 'true',
'events': 'div%7Csplit%7Cearn',
'corsDomain': 'finance.yahoo.com',
}
resp = requests.get(url, params=params)
rows = self.parse_chart_data(resp.text)
return rows
def fetch_data_by_minutes(self, symbol, start_time, end_time):
url = self.get_url(symbol)
params = {
'symbol': symbol,
'period1': self.as_timestamp(start_time),
'period2': self.as_timestamp(end_time),
'interval': '1m',
'includePrePost': 'true',
'events': 'div%7Csplit%7Cearn',
'corsDomain': 'finance.yahoo.com',
}
resp = requests.get(url, params=params)
rows = self.parse_chart_data(resp.text)
return rows
def parse_chart_data(self, raw_json):
"""Parses Yahoo Finance chart data.
See some examples if necessary:
- sample-data/yahoo_finance_msft_1m.json
- sample-data/yahoo_finance_nvda_1d.json
In case of error, the response will look something like the following:
{'chart': {
'result': None,
'error': {
'code': 'Not Found',
'description': 'No data found, symbol may be delisted'}
}
}
"""
parsed = json.loads(raw_json)
error = parsed['chart']['error']
if error:
raise ValueError(error['description'])
timestamps = parsed['chart']['result'][0]['timestamp']
timestamps = [datetime.fromtimestamp(int(t)) for t in timestamps]
quote = parsed['chart']['result'][0]['indicators']['quote'][0]
keys = ['open', 'high', 'low', 'close', 'volume']
cols = [timestamps] + [quote[k] for k in keys]
# Transposition from column-wise data to row-wise data
return zip(*cols)
def filter_empty_rows(self, rows):
for row in rows:
if all([c is not None for c in row]):
yield row
|
/sb-finance-0.4.1.tar.gz/sb-finance-0.4.1/finance/providers/yahoo.py
| 0.800146 | 0.408159 |
yahoo.py
|
pypi
|
import codecs
from datetime import timedelta
from finance.providers.provider import Provider
from finance.providers.record import DateTime, Decimal, Integer, List, String
DATE_INPUT_FORMAT = '%Y/%m/%d'
DATE_OUTPUT_FORMAT = '%Y-%m-%d'
# NOTE: This doesn't seem like a good idea...
name_code_mappings = {
'애플': 'AAPL',
'AMD': 'AMD',
'Advanced Micro Devic': 'AMD',
'Advanced Micro Devices Inc.': 'AMD',
'아마존닷컴': 'AMZN',
'아마존 닷컴': 'AMZN',
'ARK Web x.0 ETF': 'ARKW',
'Berkshire Hathaway I': 'BRK-B', # FIXME: This is a dangerous assumption (could've been BRK-A)
'버크셔해서웨이.B': 'BRK-B',
'보잉': 'BA',
'Credit Suisse High Y': 'DHY',
'CREDIT SUISSE HIGH YIELD BOND FU': 'DHY',
'Empire State Realty Trust Inc.': 'ESRT',
'Empire State Realty': 'ESRT',
'EMPIRE ST RLTY TR INC': 'ESRT',
'Direxion Daily Gold': 'NUGT',
'엔비디아': 'NVDA',
'OXFORD LANE CAPITAL': 'OXLC',
'옥스포드 래인 캐피탈': 'OXLC',
'스타벅스': 'SBUX',
'SPDR S&P 500': 'SPY',
'테슬라 모터스': 'TSLA',
'VANGUARD TAX-EXEMPT BOND ETF': 'VTEB',
'ISHARES 20+Y TREASURY BOND ETF': 'TLT',
'ISHARES IBOXX $ INVESTMENT GRADE': 'LQD',
'VANGUARD EMERGING MARKETS GOVERN': 'VWOB',
'VANGUARD SHORT-TERM INFLATION-PR': 'VTIP',
'넥슨 일본': '3659.T',
'삼성전자보통주': '005930.KS',
}
class Miraeasset(Provider):
DEFAULT_ENCODING = 'euc-kr'
def read_records(self, filename):
with codecs.open(filename, 'r', encoding=self.DEFAULT_ENCODING) as fin:
for record in self.parse_records(fin):
yield record
def find_header_column_indices(self, headers):
mappings = [
('created_at', '거래일자'),
('seq', '거래번호'),
('category', '거래종류'),
('amount', '거래금액'),
('currency', '통화코드'),
('name', '종목명'),
('unit_price', '단가'),
('quantity', '수량'),
('fees', '수수료'),
('tax', '제세금합'),
]
return {k: headers.index(v) for k, v in mappings}
# FIXME: This doesn't have to be a method
def coalesce(self, value, fallback):
return value if value else fallback
def parse_records(self, fin):
"""거래내역조회 (0650)"""
headers = next(fin).strip().split(',')
col_count = len(headers)
assert col_count == 25, 'Invalid column count ({})'.format(col_count)
column_indices = self.find_header_column_indices(headers)
for line in fin:
columns = [x.strip() for x in line.strip().split(',')]
assert len(columns) == col_count, \
'Invalid column count ({})'.format(len(columns))
column_names = [
'created_at', 'seq', 'category', 'amount', 'currency',
# 'code',
'name', 'unit_price', 'quantity', 'fees', 'tax',
]
kwargs = {k: columns[column_indices[k]] for k in column_names}
# FIXME: Fix all this shit
kwargs['amount'] = self.coalesce(kwargs['amount'], 0)
kwargs['unit_price'] = self.coalesce(kwargs['unit_price'], 0)
kwargs['quantity'] = self.coalesce(kwargs['quantity'], 0)
kwargs['fees'] = self.coalesce(kwargs['fees'], 0)
kwargs['tax'] = self.coalesce(kwargs['tax'], 0)
try:
kwargs['code'] = name_code_mappings[kwargs['name']]
except KeyError:
kwargs['code'] = '(unknown)'
kwargs['raw_columns'] = columns
yield Record(**kwargs)
class Record(object):
"""Represents a single transaction record."""
attributes = ['created_at', 'seq', 'category', 'amount', 'currency',
'code', 'name', 'unit_price', 'quantity', 'fees', 'tax',
'raw_columns']
created_at = DateTime(date_format=DATE_INPUT_FORMAT)
seq = Integer()
category = String()
amount = Decimal()
currency = String()
#: ISIN (International Securities Identification Numbers)
code = String()
name = String()
unit_price = Decimal()
quantity = Integer()
fees = Decimal()
tax = Decimal()
raw_columns = List()
def __init__(self, created_at, seq, category, amount, currency, code,
name, unit_price, quantity, fees, tax, raw_columns):
self.created_at = created_at
self.seq = seq
self.category = category
self.amount = amount
self.currency = currency
self.code = code
self.name = name
self.unit_price = unit_price
self.quantity = quantity
self.fees = fees
self.tax = tax
self.raw_columns = raw_columns
def __repr__(self):
return 'miraeasset.Record({}, {}, {}, {} ({}), {}, {})'.format(
self.created_at.strftime(DATE_OUTPUT_FORMAT), self.category,
self.amount, self.name, self.code, self.unit_price, self.quantity)
def __iter__(self):
"""Allows an Record object to become a dictionary as:
dict(record)
"""
for attr in self.attributes:
yield attr, getattr(self, attr)
def values(self):
"""Exports values only (in string)."""
for k, v in self:
if k == 'created_at':
yield v.strftime(DATE_OUTPUT_FORMAT)
else:
yield str(v)
@property
def synthesized_created_at(self):
return synthesize_datetime(self.created_at, self.seq)
def synthesize_datetime(datetime, seq):
"""The original CSV file does not include time information (it only
includes date) and there is a high probability of having multiple records
on a single day. However, we have a unique constraint on (account_id,
asset_id, created_at, quantity) fields on the Record model. In order to
circumvent potential clashes, we are adding up some seconds (with the
sequence value) on the original timestamp.
"""
return datetime + timedelta(seconds=seq)
|
/sb-finance-0.4.1.tar.gz/sb-finance-0.4.1/finance/providers/miraeasset.py
| 0.482185 | 0.36923 |
miraeasset.py
|
pypi
|
from typing import Optional, Union, Sequence, Dict
from logging import getLogger
from threading import Lock
from statsd import StatsdClient
from opentelemetry.metrics import (
MeterProvider,
Meter,
NoOpMeter,
CallbackT,
Counter,
Histogram,
ObservableCounter,
ObservableGauge,
ObservableUpDownCounter,
UpDownCounter,
)
from opentelemetry.sdk.resources import Resource
from opentelemetry.util.types import Attributes
from opentelemetry.sdk.util.instrumentation import InstrumentationScope
_logger = getLogger(__name__)
class StatsdMeterProvider(MeterProvider):
r"""Statsd MeterProvider.
Args:
statsd: StatsD client
resource: The resource representing what the metrics emitted from the SDK pertain to.
"""
def __init__(
self,
statsd: StatsdClient,
resource: Resource = Resource.create({}),
):
self._lock = Lock()
self._meter_lock = Lock()
self._statsd = statsd
self._resource = resource
self._meters = {}
def get_meter(
self,
name: str,
version: Optional[str] = None,
schema_url: Optional[str] = None,
) -> Meter:
"""Returns a StatsdMeter."""
if not name:
_logger.warning("Meter name cannot be None or empty.")
return NoOpMeter(name, version=version, schema_url=schema_url)
info = InstrumentationScope(name, version, schema_url)
with self._meter_lock:
if not self._meters.get(info):
self._meters[info] = StatsdMeter(
self._statsd,
self._resource,
info,
)
return self._meters[info]
class StatsdMeter(Meter):
"""
Statsd meter implementation.
"""
def __init__(
self,
statsd: StatsdClient,
resource: Resource,
instrumentation_scope: InstrumentationScope
):
super().__init__(instrumentation_scope.name, instrumentation_scope.version, instrumentation_scope.schema_url)
self._statsd = statsd
self._resource = resource
self._instrumentation_scope = instrumentation_scope
def create_counter(
self,
name: str,
unit: str = "",
description: str = "",
) -> Counter:
if self._is_instrument_registered(
name, StatsdCounter, unit, description
)[0]:
_logger.warning(
"An instrument with name %s, type %s, unit %s and "
"description %s has been created already.",
name,
Counter.__name__,
unit,
description,
)
return StatsdCounter(self._statsd, self._resource,
name, unit=unit, description=description)
def create_up_down_counter(
self,
name: str,
unit: str = "",
description: str = "",
) -> UpDownCounter:
if self._is_instrument_registered(
name, StatsdUpDownCounter, unit, description
)[0]:
_logger.warning(
"An instrument with name %s, type %s, unit %s and "
"description %s has been created already.",
name,
UpDownCounter.__name__,
unit,
description,
)
return StatsdUpDownCounter(self._statsd, self._resource,
name, unit=unit, description=description)
def create_observable_counter(
self,
name: str,
callbacks: Optional[Sequence[CallbackT]] = None,
unit: str = "",
description: str = "",
) -> ObservableCounter:
if self._is_instrument_registered(
name, StatsdObservableCounter, unit, description
)[0]:
_logger.warning(
"An instrument with name %s, type %s, unit %s and "
"description %s has been created already.",
name,
ObservableCounter.__name__,
unit,
description,
)
return StatsdObservableCounter(
self._statsd,
self._resource,
name,
callbacks,
unit=unit,
description=description,
)
def create_histogram(
self,
name: str,
unit: str = "",
description: str = "",
) -> Histogram:
if self._is_instrument_registered(
name, StatsdHistogram, unit, description
)[0]:
_logger.warning(
"An instrument with name %s, type %s, unit %s and "
"description %s has been created already.",
name,
Histogram.__name__,
unit,
description,
)
return StatsdHistogram(self._statsd, self._resource,
name, unit=unit, description=description)
def create_observable_gauge(
self,
name: str,
callbacks: Optional[Sequence[CallbackT]] = None,
unit: str = "",
description: str = "",
) -> ObservableGauge:
if self._is_instrument_registered(
name, StatsdObservableGauge, unit, description
)[0]:
_logger.warning(
"An instrument with name %s, type %s, unit %s and "
"description %s has been created already.",
name,
ObservableGauge.__name__,
unit,
description,
)
return StatsdObservableGauge(
self._statsd, self._resource,
name,
callbacks,
unit=unit,
description=description,
)
def create_observable_up_down_counter(
self,
name: str,
callbacks: Optional[Sequence[CallbackT]] = None,
unit: str = "",
description: str = "",
) -> ObservableUpDownCounter:
if self._is_instrument_registered(
name, StatsdObservableUpDownCounter, unit, description
)[0]:
_logger.warning(
"An instrument with name %s, type %s, unit %s and "
"description %s has been created already.",
name,
ObservableUpDownCounter.__name__,
unit,
description,
)
return StatsdObservableUpDownCounter(
self._statsd, self._resource,
name,
callbacks,
unit=unit,
description=description,
)
class StatsdCounter(Counter):
"""StatsD implementation of `Counter`."""
def __init__(
self,
statsd: StatsdClient,
resource: Resource,
name: str,
unit: str = "",
description: str = "",
) -> None:
super().__init__(name, unit=unit, description=description)
self._statsd = statsd
self._resource = resource
self._name = name
def add(
self,
amount: Union[int, float],
attributes: Optional[Attributes] = None,
) -> None:
if amount < 0:
_logger.warning(
"Add amount must be non-negative on Counter %s.", self._name
)
return
self._statsd.increment(self._name, value=amount, tags=resource_to_tags(self._resource, attributes))
class StatsdUpDownCounter(UpDownCounter):
"""StatsD implementation of `UpDownCounter`."""
def __init__(
self,
statsd: StatsdClient,
resource: Resource,
name: str,
unit: str = "",
description: str = "",
) -> None:
super().__init__(name, unit=unit, description=description)
self._statsd = statsd
self._resource = resource
self._name = name
def add(
self,
amount: Union[int, float],
attributes: Optional[Attributes] = None,
) -> None:
self._statsd.increment(self._name, value=amount, tags=resource_to_tags(self._resource, attributes))
class StatsdObservableCounter(ObservableCounter):
"""StatsD implementation of `ObservableCounter`."""
def __init__(
self,
statsd: StatsdClient,
resource: Resource,
name: str,
callbacks: Optional[Sequence[CallbackT]] = None,
unit: str = "",
description: str = "",
) -> None:
super().__init__(name, callbacks, unit=unit, description=description)
_logger.warning(
"Observable not supported for Statsd."
)
class StatsdObservableUpDownCounter(ObservableUpDownCounter):
"""No-op implementation of `ObservableUpDownCounter`."""
def __init__(
self,
statsd: StatsdClient,
resource: Resource,
name: str,
callbacks: Optional[Sequence[CallbackT]] = None,
unit: str = "",
description: str = "",
) -> None:
super().__init__(name, callbacks, unit=unit, description=description)
_logger.warning(
"Observable not supported for Statsd."
)
class StatsdHistogram(Histogram):
"""No-op implementation of `Histogram`."""
def __init__(
self,
statsd: StatsdClient,
resource: Resource,
name: str,
unit: str = "",
description: str = "",
) -> None:
super().__init__(name, unit=unit, description=description)
self._statsd = statsd
self._resource = resource
self._name = name
def record(
self,
amount: Union[int, float],
attributes: Optional[Attributes] = None,
) -> None:
self._statsd.timing(self._name, value=amount, tags=resource_to_tags(self._resource, attributes))
class StatsdObservableGauge(ObservableGauge):
"""No-op implementation of `ObservableGauge`."""
def __init__(
self,
statsd: StatsdClient,
resource: Resource,
name: str,
callbacks: Optional[Sequence[CallbackT]] = None,
unit: str = "",
description: str = "",
) -> None:
super().__init__(name, callbacks, unit=unit, description=description)
_logger.warning(
"Observable not supported for Statsd."
)
def resource_to_tags(resource: Resource, attributes: Optional[Attributes] = None) -> Optional[Dict[str, str]]:
tags = {}
for key, value in resource.attributes.items():
tags[str(key)] = str(value)
if attributes is not None:
for key, value in attributes.items():
tags[str(key)] = str(value)
if len(tags) == 0:
return None
return tags
|
/sb-opentelemetry-sdk-extension-statsd-0.8.2.tar.gz/sb-opentelemetry-sdk-extension-statsd-0.8.2/src/opentelemetry/sdk/extension/statsd/metrics/__init__.py
| 0.94252 | 0.217151 |
__init__.py
|
pypi
|
from typing import Any, Dict, List, Optional, Type
import torch as th
from gymnasium import spaces
from stable_baselines3.common.policies import BasePolicy
from stable_baselines3.common.torch_layers import (
BaseFeaturesExtractor,
CombinedExtractor,
FlattenExtractor,
NatureCNN,
create_mlp,
)
from stable_baselines3.common.type_aliases import Schedule
from torch import nn
class QuantileNetwork(BasePolicy):
"""
Quantile network for QR-DQN
:param observation_space: Observation space
:param action_space: Action space
:param n_quantiles: Number of quantiles
:param net_arch: The specification of the network architecture.
:param activation_fn: Activation function
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
"""
action_space: spaces.Discrete
def __init__(
self,
observation_space: spaces.Space,
action_space: spaces.Discrete,
features_extractor: BaseFeaturesExtractor,
features_dim: int,
n_quantiles: int = 200,
net_arch: Optional[List[int]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
normalize_images: bool = True,
):
super().__init__(
observation_space,
action_space,
features_extractor=features_extractor,
normalize_images=normalize_images,
)
if net_arch is None:
net_arch = [64, 64]
self.net_arch = net_arch
self.activation_fn = activation_fn
self.features_dim = features_dim
self.n_quantiles = n_quantiles
action_dim = int(self.action_space.n) # number of actions
quantile_net = create_mlp(self.features_dim, action_dim * self.n_quantiles, self.net_arch, self.activation_fn)
self.quantile_net = nn.Sequential(*quantile_net)
def forward(self, obs: th.Tensor) -> th.Tensor:
"""
Predict the quantiles.
:param obs: Observation
:return: The estimated quantiles for each action.
"""
quantiles = self.quantile_net(self.extract_features(obs, self.features_extractor))
return quantiles.view(-1, self.n_quantiles, int(self.action_space.n))
def _predict(self, observation: th.Tensor, deterministic: bool = True) -> th.Tensor:
q_values = self(observation).mean(dim=1)
# Greedy action
action = q_values.argmax(dim=1).reshape(-1)
return action
def _get_constructor_parameters(self) -> Dict[str, Any]:
data = super()._get_constructor_parameters()
data.update(
dict(
net_arch=self.net_arch,
features_dim=self.features_dim,
n_quantiles=self.n_quantiles,
activation_fn=self.activation_fn,
features_extractor=self.features_extractor,
)
)
return data
class QRDQNPolicy(BasePolicy):
"""
Policy class with quantile and target networks for QR-DQN.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param n_quantiles: Number of quantiles
:param net_arch: The specification of the network architecture.
:param activation_fn: Activation function
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
quantile_net: QuantileNetwork
quantile_net_target: QuantileNetwork
def __init__(
self,
observation_space: spaces.Space,
action_space: spaces.Discrete,
lr_schedule: Schedule,
n_quantiles: int = 200,
net_arch: Optional[List[int]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super().__init__(
observation_space,
action_space,
features_extractor_class,
features_extractor_kwargs,
optimizer_class=optimizer_class,
optimizer_kwargs=optimizer_kwargs,
normalize_images=normalize_images,
)
if net_arch is None:
if features_extractor_class == NatureCNN:
net_arch = []
else:
net_arch = [64, 64]
self.n_quantiles = n_quantiles
self.net_arch = net_arch
self.activation_fn = activation_fn
self.net_args = {
"observation_space": self.observation_space,
"action_space": self.action_space,
"n_quantiles": self.n_quantiles,
"net_arch": self.net_arch,
"activation_fn": self.activation_fn,
"normalize_images": normalize_images,
}
self._build(lr_schedule)
def _build(self, lr_schedule: Schedule) -> None:
"""
Create the network and the optimizer.
:param lr_schedule: Learning rate schedule
lr_schedule(1) is the initial learning rate
"""
self.quantile_net = self.make_quantile_net()
self.quantile_net_target = self.make_quantile_net()
self.quantile_net_target.load_state_dict(self.quantile_net.state_dict())
self.quantile_net_target.set_training_mode(False)
# Setup optimizer with initial learning rate
self.optimizer = self.optimizer_class( # type: ignore[call-arg]
self.parameters(),
lr=lr_schedule(1),
**self.optimizer_kwargs,
)
def make_quantile_net(self) -> QuantileNetwork:
# Make sure we always have separate networks for features extractors etc
net_args = self._update_features_extractor(self.net_args, features_extractor=None)
return QuantileNetwork(**net_args).to(self.device)
def forward(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:
return self._predict(obs, deterministic=deterministic)
def _predict(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:
return self.quantile_net._predict(obs, deterministic=deterministic)
def _get_constructor_parameters(self) -> Dict[str, Any]:
data = super()._get_constructor_parameters()
data.update(
dict(
n_quantiles=self.net_args["n_quantiles"],
net_arch=self.net_args["net_arch"],
activation_fn=self.net_args["activation_fn"],
lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone
optimizer_class=self.optimizer_class,
optimizer_kwargs=self.optimizer_kwargs,
features_extractor_class=self.features_extractor_class,
features_extractor_kwargs=self.features_extractor_kwargs,
)
)
return data
def set_training_mode(self, mode: bool) -> None:
"""
Put the policy in either training or evaluation mode.
This affects certain modules, such as batch normalisation and dropout.
:param mode: if true, set to training mode, else set to evaluation mode
"""
self.quantile_net.set_training_mode(mode)
self.training = mode
MlpPolicy = QRDQNPolicy
class CnnPolicy(QRDQNPolicy):
"""
Policy class for QR-DQN when using images as input.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param n_quantiles: Number of quantiles
:param net_arch: The specification of the network architecture.
:param activation_fn: Activation function
:param features_extractor_class: Features extractor to use.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: spaces.Space,
action_space: spaces.Discrete,
lr_schedule: Schedule,
n_quantiles: int = 200,
net_arch: Optional[List[int]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super().__init__(
observation_space,
action_space,
lr_schedule,
n_quantiles,
net_arch,
activation_fn,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
class MultiInputPolicy(QRDQNPolicy):
"""
Policy class for QR-DQN when using dict observations as input.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param n_quantiles: Number of quantiles
:param net_arch: The specification of the network architecture.
:param activation_fn: Activation function
:param features_extractor_class: Features extractor to use.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: spaces.Space,
action_space: spaces.Discrete,
lr_schedule: Schedule,
n_quantiles: int = 200,
net_arch: Optional[List[int]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super().__init__(
observation_space,
action_space,
lr_schedule,
n_quantiles,
net_arch,
activation_fn,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
|
/sb3_contrib-2.1.0.tar.gz/sb3_contrib-2.1.0/sb3_contrib/qrdqn/policies.py
| 0.964313 | 0.565419 |
policies.py
|
pypi
|
import warnings
from typing import Any, ClassVar, Dict, List, Optional, Tuple, Type, TypeVar, Union
import numpy as np
import torch as th
from gymnasium import spaces
from stable_baselines3.common.buffers import ReplayBuffer
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
from stable_baselines3.common.policies import BasePolicy
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import get_linear_fn, get_parameters_by_name, polyak_update
from sb3_contrib.common.utils import quantile_huber_loss
from sb3_contrib.qrdqn.policies import CnnPolicy, MlpPolicy, MultiInputPolicy, QRDQNPolicy, QuantileNetwork
SelfQRDQN = TypeVar("SelfQRDQN", bound="QRDQN")
class QRDQN(OffPolicyAlgorithm):
"""
Quantile Regression Deep Q-Network (QR-DQN)
Paper: https://arxiv.org/abs/1710.10044
Default hyperparameters are taken from the paper and are tuned for Atari games.
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: The learning rate, it can be a function
of the current progress remaining (from 1 to 0)
:param buffer_size: size of the replay buffer
:param learning_starts: how many steps of the model to collect transitions for before learning starts
:param batch_size: Minibatch size for each gradient update
:param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update
:param gamma: the discount factor
:param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit
like ``(5, "step")`` or ``(2, "episode")``.
:param gradient_steps: How many gradient steps to do after each rollout
(see ``train_freq`` and ``n_episodes_rollout``)
Set to ``-1`` means to do as many gradient steps as steps done in the environment
during the rollout.
:param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``).
If ``None``, it will be automatically selected.
:param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation.
:param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
at a cost of more complexity.
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
:param target_update_interval: update the target network every ``target_update_interval``
environment steps.
:param exploration_fraction: fraction of entire training period over which the exploration rate is reduced
:param exploration_initial_eps: initial value of random action probability
:param exploration_final_eps: final value of random action probability
:param max_grad_norm: The maximum value for the gradient clipping (if None, no clipping)
:param stats_window_size: Window size for the rollout logging, specifying the number of episodes to average
the reported success rate, mean episode length, and mean reward over
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
policy_aliases: ClassVar[Dict[str, Type[BasePolicy]]] = {
"MlpPolicy": MlpPolicy,
"CnnPolicy": CnnPolicy,
"MultiInputPolicy": MultiInputPolicy,
}
# Linear schedule will be defined in `_setup_model()`
exploration_schedule: Schedule
quantile_net: QuantileNetwork
quantile_net_target: QuantileNetwork
policy: QRDQNPolicy
def __init__(
self,
policy: Union[str, Type[QRDQNPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 5e-5,
buffer_size: int = 1000000, # 1e6
learning_starts: int = 50000,
batch_size: int = 32,
tau: float = 1.0,
gamma: float = 0.99,
train_freq: int = 4,
gradient_steps: int = 1,
replay_buffer_class: Optional[Type[ReplayBuffer]] = None,
replay_buffer_kwargs: Optional[Dict[str, Any]] = None,
optimize_memory_usage: bool = False,
target_update_interval: int = 10000,
exploration_fraction: float = 0.005,
exploration_initial_eps: float = 1.0,
exploration_final_eps: float = 0.01,
max_grad_norm: Optional[float] = None,
stats_window_size: int = 100,
tensorboard_log: Optional[str] = None,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super().__init__(
policy,
env,
learning_rate,
buffer_size,
learning_starts,
batch_size,
tau,
gamma,
train_freq,
gradient_steps,
action_noise=None, # No action noise
replay_buffer_class=replay_buffer_class,
replay_buffer_kwargs=replay_buffer_kwargs,
policy_kwargs=policy_kwargs,
stats_window_size=stats_window_size,
tensorboard_log=tensorboard_log,
verbose=verbose,
device=device,
seed=seed,
sde_support=False,
optimize_memory_usage=optimize_memory_usage,
supported_action_spaces=(spaces.Discrete,),
support_multi_env=True,
)
self.exploration_initial_eps = exploration_initial_eps
self.exploration_final_eps = exploration_final_eps
self.exploration_fraction = exploration_fraction
self.target_update_interval = target_update_interval
# For updating the target network with multiple envs:
self._n_calls = 0
self.max_grad_norm = max_grad_norm
# "epsilon" for the epsilon-greedy exploration
self.exploration_rate = 0.0
if "optimizer_class" not in self.policy_kwargs:
self.policy_kwargs["optimizer_class"] = th.optim.Adam
# Proposed in the QR-DQN paper where `batch_size = 32`
self.policy_kwargs["optimizer_kwargs"] = dict(eps=0.01 / batch_size)
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
super()._setup_model()
self._create_aliases()
# Copy running stats, see https://github.com/DLR-RM/stable-baselines3/issues/996
self.batch_norm_stats = get_parameters_by_name(self.quantile_net, ["running_"])
self.batch_norm_stats_target = get_parameters_by_name(self.quantile_net_target, ["running_"])
self.exploration_schedule = get_linear_fn(
self.exploration_initial_eps, self.exploration_final_eps, self.exploration_fraction
)
# Account for multiple environments
# each call to step() corresponds to n_envs transitions
if self.n_envs > 1:
if self.n_envs > self.target_update_interval:
warnings.warn(
"The number of environments used is greater than the target network "
f"update interval ({self.n_envs} > {self.target_update_interval}), "
"therefore the target network will be updated after each call to env.step() "
f"which corresponds to {self.n_envs} steps."
)
self.target_update_interval = max(self.target_update_interval // self.n_envs, 1)
def _create_aliases(self) -> None:
self.quantile_net = self.policy.quantile_net
self.quantile_net_target = self.policy.quantile_net_target
self.n_quantiles = self.policy.n_quantiles
def _on_step(self) -> None:
"""
Update the exploration rate and target network if needed.
This method is called in ``collect_rollouts()`` after each step in the environment.
"""
self._n_calls += 1
if self._n_calls % self.target_update_interval == 0:
polyak_update(self.quantile_net.parameters(), self.quantile_net_target.parameters(), self.tau)
# Copy running stats, see https://github.com/DLR-RM/stable-baselines3/issues/996
polyak_update(self.batch_norm_stats, self.batch_norm_stats_target, 1.0)
self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)
self.logger.record("rollout/exploration_rate", self.exploration_rate)
def train(self, gradient_steps: int, batch_size: int = 100) -> None:
# Switch to train mode (this affects batch norm / dropout)
self.policy.set_training_mode(True)
# Update learning rate according to schedule
self._update_learning_rate(self.policy.optimizer)
losses = []
for _ in range(gradient_steps):
# Sample replay buffer
replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env) # type: ignore[union-attr]
with th.no_grad():
# Compute the quantiles of next observation
next_quantiles = self.quantile_net_target(replay_data.next_observations)
# Compute the greedy actions which maximize the next Q values
next_greedy_actions = next_quantiles.mean(dim=1, keepdim=True).argmax(dim=2, keepdim=True)
# Make "n_quantiles" copies of actions, and reshape to (batch_size, n_quantiles, 1)
next_greedy_actions = next_greedy_actions.expand(batch_size, self.n_quantiles, 1)
# Follow greedy policy: use the one with the highest Q values
next_quantiles = next_quantiles.gather(dim=2, index=next_greedy_actions).squeeze(dim=2)
# 1-step TD target
target_quantiles = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_quantiles
# Get current quantile estimates
current_quantiles = self.quantile_net(replay_data.observations)
# Make "n_quantiles" copies of actions, and reshape to (batch_size, n_quantiles, 1).
actions = replay_data.actions[..., None].long().expand(batch_size, self.n_quantiles, 1)
# Retrieve the quantiles for the actions from the replay buffer
current_quantiles = th.gather(current_quantiles, dim=2, index=actions).squeeze(dim=2)
# Compute Quantile Huber loss, summing over a quantile dimension as in the paper.
loss = quantile_huber_loss(current_quantiles, target_quantiles, sum_over_quantiles=True)
losses.append(loss.item())
# Optimize the policy
self.policy.optimizer.zero_grad()
loss.backward()
# Clip gradient norm
if self.max_grad_norm is not None:
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.policy.optimizer.step()
# Increase update counter
self._n_updates += gradient_steps
self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
self.logger.record("train/loss", np.mean(losses))
def predict(
self,
observation: Union[np.ndarray, Dict[str, np.ndarray]],
state: Optional[Tuple[np.ndarray, ...]] = None,
episode_start: Optional[np.ndarray] = None,
deterministic: bool = False,
) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:
"""
Get the policy action from an observation (and optional hidden state).
Includes sugar-coating to handle different observations (e.g. normalizing images).
:param observation: the input observation
:param state: The last hidden states (can be None, used in recurrent policies)
:param episode_start: The last masks (can be None, used in recurrent policies)
this correspond to beginning of episodes,
where the hidden states of the RNN must be reset.
:param deterministic: Whether or not to return deterministic actions.
:return: the model's action and the next hidden state
(used in recurrent policies)
"""
if not deterministic and np.random.rand() < self.exploration_rate:
if self.policy.is_vectorized_observation(observation):
if isinstance(observation, dict):
n_batch = observation[next(iter(observation.keys()))].shape[0]
else:
n_batch = observation.shape[0]
action = np.array([self.action_space.sample() for _ in range(n_batch)])
else:
action = np.array(self.action_space.sample())
else:
action, state = self.policy.predict(observation, state, episode_start, deterministic)
return action, state
def learn(
self: SelfQRDQN,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 4,
tb_log_name: str = "QRDQN",
reset_num_timesteps: bool = True,
progress_bar: bool = False,
) -> SelfQRDQN:
return super().learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
tb_log_name=tb_log_name,
reset_num_timesteps=reset_num_timesteps,
progress_bar=progress_bar,
)
def _excluded_save_params(self) -> List[str]:
return super()._excluded_save_params() + ["quantile_net", "quantile_net_target"] # noqa: RUF005
def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
state_dicts = ["policy", "policy.optimizer"]
return state_dicts, []
|
/sb3_contrib-2.1.0.tar.gz/sb3_contrib-2.1.0/sb3_contrib/qrdqn/qrdqn.py
| 0.950457 | 0.547646 |
qrdqn.py
|
pypi
|
from typing import Any, Dict, List, Optional, Type
import torch as th
from gymnasium import spaces
from stable_baselines3.common.policies import BasePolicy
from stable_baselines3.common.preprocessing import get_action_dim
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, create_mlp
from torch import nn
class ARSPolicy(BasePolicy):
"""
Policy network for ARS.
:param observation_space: The observation space of the environment
:param action_space: The action space of the environment
:param net_arch: Network architecture, defaults to a 2 layers MLP with 64 hidden nodes.
:param activation_fn: Activation function
:param with_bias: If set to False, the layers will not learn an additive bias
:param squash_output: For continuous actions, whether the output is squashed
or not using a ``tanh()`` function. If not squashed with tanh the output will instead be clipped.
"""
def __init__(
self,
observation_space: spaces.Space,
action_space: spaces.Space,
net_arch: Optional[List[int]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
with_bias: bool = True,
squash_output: bool = True,
):
super().__init__(
observation_space,
action_space,
squash_output=isinstance(action_space, spaces.Box) and squash_output,
)
if net_arch is None:
net_arch = [64, 64]
self.net_arch = net_arch
self.features_extractor = self.make_features_extractor()
self.features_dim = self.features_extractor.features_dim
self.activation_fn = activation_fn
if isinstance(action_space, spaces.Box):
action_dim = get_action_dim(action_space)
actor_net = create_mlp(
self.features_dim, action_dim, net_arch, activation_fn, with_bias=with_bias, squash_output=squash_output
)
elif isinstance(action_space, spaces.Discrete):
actor_net = create_mlp(self.features_dim, int(action_space.n), net_arch, activation_fn, with_bias=with_bias)
else:
raise NotImplementedError(f"Error: ARS policy not implemented for action space of type {type(action_space)}.")
self.action_net = nn.Sequential(*actor_net)
def _get_constructor_parameters(self) -> Dict[str, Any]:
# data = super()._get_constructor_parameters() this adds normalize_images, which we don't support...
data = dict(
observation_space=self.observation_space,
action_space=self.action_space,
net_arch=self.net_arch,
activation_fn=self.activation_fn,
)
return data
def forward(self, obs: th.Tensor) -> th.Tensor:
# Make mypy happy:
assert isinstance(self.features_extractor, BaseFeaturesExtractor)
features = self.extract_features(obs, self.features_extractor)
if isinstance(self.action_space, spaces.Box):
return self.action_net(features)
elif isinstance(self.action_space, spaces.Discrete):
logits = self.action_net(features)
return th.argmax(logits, dim=1)
else:
raise NotImplementedError()
def _predict(self, observation: th.Tensor, deterministic: bool = True) -> th.Tensor:
# Non deterministic action does not really make sense for ARS, we ignore this parameter for now..
return self(observation)
class ARSLinearPolicy(ARSPolicy):
"""
Linear policy network for ARS.
:param observation_space: The observation space of the environment
:param action_space: The action space of the environment
:param with_bias: With or without bias on the output
:param squash_output: For continuous actions, whether the output is squashed
or not using a ``tanh()`` function. If not squashed with tanh the output will instead be clipped.
"""
def __init__(
self,
observation_space: spaces.Space,
action_space: spaces.Space,
with_bias: bool = False,
squash_output: bool = False,
):
super().__init__(observation_space, action_space, net_arch=[], with_bias=with_bias, squash_output=squash_output)
MlpPolicy = ARSPolicy
LinearPolicy = ARSLinearPolicy
|
/sb3_contrib-2.1.0.tar.gz/sb3_contrib-2.1.0/sb3_contrib/ars/policies.py
| 0.968351 | 0.649078 |
policies.py
|
pypi
|
import sys
import time
from collections import deque
from typing import Any, ClassVar, Dict, Optional, Tuple, Type, TypeVar, Union
import numpy as np
import torch as th
from gymnasium import spaces
from stable_baselines3.common import utils
from stable_baselines3.common.buffers import RolloutBuffer
from stable_baselines3.common.callbacks import BaseCallback, CallbackList, ConvertCallback, ProgressBarCallback
from stable_baselines3.common.on_policy_algorithm import OnPolicyAlgorithm
from stable_baselines3.common.policies import BasePolicy
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import explained_variance, get_schedule_fn, obs_as_tensor, safe_mean
from stable_baselines3.common.vec_env import VecEnv
from torch.nn import functional as F
from sb3_contrib.common.maskable.buffers import MaskableDictRolloutBuffer, MaskableRolloutBuffer
from sb3_contrib.common.maskable.policies import MaskableActorCriticPolicy
from sb3_contrib.common.maskable.utils import get_action_masks, is_masking_supported
from sb3_contrib.ppo_mask.policies import CnnPolicy, MlpPolicy, MultiInputPolicy
SelfMaskablePPO = TypeVar("SelfMaskablePPO", bound="MaskablePPO")
class MaskablePPO(OnPolicyAlgorithm):
"""
Proximal Policy Optimization algorithm (PPO) (clip version) with Invalid Action Masking.
Based on the original Stable Baselines 3 implementation.
Introduction to PPO: https://spinningup.openai.com/en/latest/algorithms/ppo.html
Background on Invalid Action Masking: https://arxiv.org/abs/2006.14171
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: The learning rate, it can be a function
of the current progress remaining (from 1 to 0)
:param n_steps: The number of steps to run for each environment per update
(i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)
:param batch_size: Minibatch size
:param n_epochs: Number of epoch when optimizing the surrogate loss
:param gamma: Discount factor
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator
:param clip_range: Clipping parameter, it can be a function of the current progress
remaining (from 1 to 0).
:param clip_range_vf: Clipping parameter for the value function,
it can be a function of the current progress remaining (from 1 to 0).
This is a parameter specific to the OpenAI implementation. If None is passed (default),
no clipping will be done on the value function.
IMPORTANT: this clipping depends on the reward scaling.
:param normalize_advantage: Whether to normalize or not the advantage
:param ent_coef: Entropy coefficient for the loss calculation
:param vf_coef: Value function coefficient for the loss calculation
:param max_grad_norm: The maximum value for the gradient clipping
:param target_kl: Limit the KL divergence between updates,
because the clipping is not enough to prevent large update
see issue #213 (cf https://github.com/hill-a/stable-baselines/issues/213)
By default, there is no limit on the kl div.
:param stats_window_size: Window size for the rollout logging, specifying the number of episodes to average
the reported success rate, mean episode length, and mean reward over
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
policy_aliases: ClassVar[Dict[str, Type[BasePolicy]]] = {
"MlpPolicy": MlpPolicy,
"CnnPolicy": CnnPolicy,
"MultiInputPolicy": MultiInputPolicy,
}
def __init__(
self,
policy: Union[str, Type[MaskableActorCriticPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 3e-4,
n_steps: int = 2048,
batch_size: Optional[int] = 64,
n_epochs: int = 10,
gamma: float = 0.99,
gae_lambda: float = 0.95,
clip_range: Union[float, Schedule] = 0.2,
clip_range_vf: Union[None, float, Schedule] = None,
normalize_advantage: bool = True,
ent_coef: float = 0.0,
vf_coef: float = 0.5,
max_grad_norm: float = 0.5,
target_kl: Optional[float] = None,
stats_window_size: int = 100,
tensorboard_log: Optional[str] = None,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super().__init__(
policy,
env,
learning_rate=learning_rate,
n_steps=n_steps,
gamma=gamma,
gae_lambda=gae_lambda,
ent_coef=ent_coef,
vf_coef=vf_coef,
max_grad_norm=max_grad_norm,
use_sde=False,
sde_sample_freq=-1,
stats_window_size=stats_window_size,
tensorboard_log=tensorboard_log,
policy_kwargs=policy_kwargs,
verbose=verbose,
seed=seed,
device=device,
_init_setup_model=False,
supported_action_spaces=(
spaces.Discrete,
spaces.MultiDiscrete,
spaces.MultiBinary,
),
)
self.batch_size = batch_size
self.n_epochs = n_epochs
self.clip_range = clip_range
self.clip_range_vf = clip_range_vf
self.normalize_advantage = normalize_advantage
self.target_kl = target_kl
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
self._setup_lr_schedule()
self.set_random_seed(self.seed)
buffer_cls = MaskableDictRolloutBuffer if isinstance(self.observation_space, spaces.Dict) else MaskableRolloutBuffer
self.policy = self.policy_class(
self.observation_space,
self.action_space,
self.lr_schedule,
**self.policy_kwargs, # pytype:disable=not-instantiable
)
self.policy = self.policy.to(self.device)
if not isinstance(self.policy, MaskableActorCriticPolicy):
raise ValueError("Policy must subclass MaskableActorCriticPolicy")
self.rollout_buffer = buffer_cls(
self.n_steps,
self.observation_space,
self.action_space,
self.device,
gamma=self.gamma,
gae_lambda=self.gae_lambda,
n_envs=self.n_envs,
)
# Initialize schedules for policy/value clipping
self.clip_range = get_schedule_fn(self.clip_range)
if self.clip_range_vf is not None:
if isinstance(self.clip_range_vf, (float, int)):
assert self.clip_range_vf > 0, "`clip_range_vf` must be positive, " "pass `None` to deactivate vf clipping"
self.clip_range_vf = get_schedule_fn(self.clip_range_vf)
def _init_callback(
self,
callback: MaybeCallback,
use_masking: bool = True,
progress_bar: bool = False,
) -> BaseCallback:
"""
:param callback: Callback(s) called at every step with state of the algorithm.
:param use_masking: Whether or not to use invalid action masks during evaluation
:param progress_bar: Display a progress bar using tqdm and rich.
:return: A hybrid callback calling `callback` and performing evaluation.
"""
# Convert a list of callbacks into a callback
if isinstance(callback, list):
callback = CallbackList(callback)
# Convert functional callback to object
if not isinstance(callback, BaseCallback):
callback = ConvertCallback(callback)
# Add progress bar callback
if progress_bar:
callback = CallbackList([callback, ProgressBarCallback()])
callback.init_callback(self)
return callback
def _setup_learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
reset_num_timesteps: bool = True,
tb_log_name: str = "run",
use_masking: bool = True,
progress_bar: bool = False,
) -> Tuple[int, BaseCallback]:
"""
Initialize different variables needed for training.
:param total_timesteps: The total number of samples (env steps) to train on
:param callback: Callback(s) called at every step with state of the algorithm.
:param reset_num_timesteps: Whether to reset or not the ``num_timesteps`` attribute
:param tb_log_name: the name of the run for tensorboard log
:param use_masking: Whether or not to use invalid action masks during training
:param progress_bar: Display a progress bar using tqdm and rich.
:return:
"""
self.start_time = time.time_ns()
if self.ep_info_buffer is None or reset_num_timesteps:
# Initialize buffers if they don't exist, or reinitialize if resetting counters
self.ep_info_buffer = deque(maxlen=self._stats_window_size)
self.ep_success_buffer = deque(maxlen=self._stats_window_size)
if reset_num_timesteps:
self.num_timesteps = 0
self._episode_num = 0
else:
# Make sure training timesteps are ahead of the internal counter
total_timesteps += self.num_timesteps
self._total_timesteps = total_timesteps
self._num_timesteps_at_start = self.num_timesteps
# Avoid resetting the environment when calling ``.learn()`` consecutive times
if reset_num_timesteps or self._last_obs is None:
self._last_obs = self.env.reset()
self._last_episode_starts = np.ones((self.env.num_envs,), dtype=bool)
# Retrieve unnormalized observation for saving into the buffer
if self._vec_normalize_env is not None:
self._last_original_obs = self._vec_normalize_env.get_original_obs()
# Configure logger's outputs if no logger was passed
if not self._custom_logger:
self._logger = utils.configure_logger(self.verbose, self.tensorboard_log, tb_log_name, reset_num_timesteps)
# Create eval callback if needed
callback = self._init_callback(callback, use_masking, progress_bar)
return total_timesteps, callback
def collect_rollouts(
self,
env: VecEnv,
callback: BaseCallback,
rollout_buffer: RolloutBuffer,
n_rollout_steps: int,
use_masking: bool = True,
) -> bool:
"""
Collect experiences using the current policy and fill a ``RolloutBuffer``.
The term rollout here refers to the model-free notion and should not
be used with the concept of rollout used in model-based RL or planning.
This method is largely identical to the implementation found in the parent class.
:param env: The training environment
:param callback: Callback that will be called at each step
(and at the beginning and end of the rollout)
:param rollout_buffer: Buffer to fill with rollouts
:param n_steps: Number of experiences to collect per environment
:param use_masking: Whether or not to use invalid action masks during training
:return: True if function returned with at least `n_rollout_steps`
collected, False if callback terminated rollout prematurely.
"""
assert isinstance(
rollout_buffer, (MaskableRolloutBuffer, MaskableDictRolloutBuffer)
), "RolloutBuffer doesn't support action masking"
assert self._last_obs is not None, "No previous observation was provided"
# Switch to eval mode (this affects batch norm / dropout)
self.policy.set_training_mode(False)
n_steps = 0
action_masks = None
rollout_buffer.reset()
if use_masking and not is_masking_supported(env):
raise ValueError("Environment does not support action masking. Consider using ActionMasker wrapper")
callback.on_rollout_start()
while n_steps < n_rollout_steps:
with th.no_grad():
# Convert to pytorch tensor or to TensorDict
obs_tensor = obs_as_tensor(self._last_obs, self.device)
# This is the only change related to invalid action masking
if use_masking:
action_masks = get_action_masks(env)
actions, values, log_probs = self.policy(obs_tensor, action_masks=action_masks)
actions = actions.cpu().numpy()
new_obs, rewards, dones, infos = env.step(actions)
self.num_timesteps += env.num_envs
# Give access to local variables
callback.update_locals(locals())
if callback.on_step() is False:
return False
self._update_info_buffer(infos)
n_steps += 1
if isinstance(self.action_space, spaces.Discrete):
# Reshape in case of discrete action
actions = actions.reshape(-1, 1)
# Handle timeout by bootstraping with value function
# see GitHub issue #633
for idx, done in enumerate(dones):
if (
done
and infos[idx].get("terminal_observation") is not None
and infos[idx].get("TimeLimit.truncated", False)
):
terminal_obs = self.policy.obs_to_tensor(infos[idx]["terminal_observation"])[0]
with th.no_grad():
terminal_value = self.policy.predict_values(terminal_obs)[0]
rewards[idx] += self.gamma * terminal_value
rollout_buffer.add(
self._last_obs,
actions,
rewards,
self._last_episode_starts,
values,
log_probs,
action_masks=action_masks,
)
self._last_obs = new_obs
self._last_episode_starts = dones
with th.no_grad():
# Compute value for the last timestep
# Masking is not needed here, the choice of action doesn't matter.
# We only want the value of the current observation.
values = self.policy.predict_values(obs_as_tensor(new_obs, self.device))
rollout_buffer.compute_returns_and_advantage(last_values=values, dones=dones)
callback.on_rollout_end()
return True
def predict(
self,
observation: np.ndarray,
state: Optional[Tuple[np.ndarray, ...]] = None,
episode_start: Optional[np.ndarray] = None,
deterministic: bool = False,
action_masks: Optional[np.ndarray] = None,
) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:
"""
Get the policy action from an observation (and optional hidden state).
Includes sugar-coating to handle different observations (e.g. normalizing images).
:param observation: the input observation
:param state: The last hidden states (can be None, used in recurrent policies)
:param episode_start: The last masks (can be None, used in recurrent policies)
this correspond to beginning of episodes,
where the hidden states of the RNN must be reset.
:param deterministic: Whether or not to return deterministic actions.
:return: the model's action and the next hidden state
(used in recurrent policies)
"""
return self.policy.predict(observation, state, episode_start, deterministic, action_masks=action_masks)
def train(self) -> None:
"""
Update policy using the currently gathered rollout buffer.
"""
# Switch to train mode (this affects batch norm / dropout)
self.policy.set_training_mode(True)
# Update optimizer learning rate
self._update_learning_rate(self.policy.optimizer)
# Compute current clip range
clip_range = self.clip_range(self._current_progress_remaining)
# Optional: clip range for the value function
if self.clip_range_vf is not None:
clip_range_vf = self.clip_range_vf(self._current_progress_remaining)
entropy_losses = []
pg_losses, value_losses = [], []
clip_fractions = []
continue_training = True
# train for n_epochs epochs
for epoch in range(self.n_epochs):
approx_kl_divs = []
# Do a complete pass on the rollout buffer
for rollout_data in self.rollout_buffer.get(self.batch_size):
actions = rollout_data.actions
if isinstance(self.action_space, spaces.Discrete):
# Convert discrete action from float to long
actions = rollout_data.actions.long().flatten()
values, log_prob, entropy = self.policy.evaluate_actions(
rollout_data.observations,
actions,
action_masks=rollout_data.action_masks,
)
values = values.flatten()
# Normalize advantage
advantages = rollout_data.advantages
if self.normalize_advantage:
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
# ratio between old and new policy, should be one at the first iteration
ratio = th.exp(log_prob - rollout_data.old_log_prob)
# clipped surrogate loss
policy_loss_1 = advantages * ratio
policy_loss_2 = advantages * th.clamp(ratio, 1 - clip_range, 1 + clip_range)
policy_loss = -th.min(policy_loss_1, policy_loss_2).mean()
# Logging
pg_losses.append(policy_loss.item())
clip_fraction = th.mean((th.abs(ratio - 1) > clip_range).float()).item()
clip_fractions.append(clip_fraction)
if self.clip_range_vf is None:
# No clipping
values_pred = values
else:
# Clip the different between old and new value
# NOTE: this depends on the reward scaling
values_pred = rollout_data.old_values + th.clamp(
values - rollout_data.old_values, -clip_range_vf, clip_range_vf
)
# Value loss using the TD(gae_lambda) target
value_loss = F.mse_loss(rollout_data.returns, values_pred)
value_losses.append(value_loss.item())
# Entropy loss favor exploration
if entropy is None:
# Approximate entropy when no analytical form
entropy_loss = -th.mean(-log_prob)
else:
entropy_loss = -th.mean(entropy)
entropy_losses.append(entropy_loss.item())
loss = policy_loss + self.ent_coef * entropy_loss + self.vf_coef * value_loss
# Calculate approximate form of reverse KL Divergence for early stopping
# see issue #417: https://github.com/DLR-RM/stable-baselines3/issues/417
# and discussion in PR #419: https://github.com/DLR-RM/stable-baselines3/pull/419
# and Schulman blog: http://joschu.net/blog/kl-approx.html
with th.no_grad():
log_ratio = log_prob - rollout_data.old_log_prob
approx_kl_div = th.mean((th.exp(log_ratio) - 1) - log_ratio).cpu().numpy()
approx_kl_divs.append(approx_kl_div)
if self.target_kl is not None and approx_kl_div > 1.5 * self.target_kl:
continue_training = False
if self.verbose >= 1:
print(f"Early stopping at step {epoch} due to reaching max kl: {approx_kl_div:.2f}")
break
# Optimization step
self.policy.optimizer.zero_grad()
loss.backward()
# Clip grad norm
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.policy.optimizer.step()
if not continue_training:
break
self._n_updates += self.n_epochs
explained_var = explained_variance(self.rollout_buffer.values.flatten(), self.rollout_buffer.returns.flatten())
# Logs
self.logger.record("train/entropy_loss", np.mean(entropy_losses))
self.logger.record("train/policy_gradient_loss", np.mean(pg_losses))
self.logger.record("train/value_loss", np.mean(value_losses))
self.logger.record("train/approx_kl", np.mean(approx_kl_divs))
self.logger.record("train/clip_fraction", np.mean(clip_fractions))
self.logger.record("train/loss", loss.item())
self.logger.record("train/explained_variance", explained_var)
self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
self.logger.record("train/clip_range", clip_range)
if self.clip_range_vf is not None:
self.logger.record("train/clip_range_vf", clip_range_vf)
def learn(
self: SelfMaskablePPO,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 1,
tb_log_name: str = "PPO",
reset_num_timesteps: bool = True,
use_masking: bool = True,
progress_bar: bool = False,
) -> SelfMaskablePPO:
iteration = 0
total_timesteps, callback = self._setup_learn(
total_timesteps,
callback,
reset_num_timesteps,
tb_log_name,
use_masking,
progress_bar,
)
callback.on_training_start(locals(), globals())
while self.num_timesteps < total_timesteps:
continue_training = self.collect_rollouts(self.env, callback, self.rollout_buffer, self.n_steps, use_masking)
if continue_training is False:
break
iteration += 1
self._update_current_progress_remaining(self.num_timesteps, total_timesteps)
# Display training infos
if log_interval is not None and iteration % log_interval == 0:
time_elapsed = max((time.time_ns() - self.start_time) / 1e9, sys.float_info.epsilon)
fps = int((self.num_timesteps - self._num_timesteps_at_start) / time_elapsed)
self.logger.record("time/iterations", iteration, exclude="tensorboard")
if len(self.ep_info_buffer) > 0 and len(self.ep_info_buffer[0]) > 0:
self.logger.record("rollout/ep_rew_mean", safe_mean([ep_info["r"] for ep_info in self.ep_info_buffer]))
self.logger.record("rollout/ep_len_mean", safe_mean([ep_info["l"] for ep_info in self.ep_info_buffer]))
self.logger.record("time/fps", fps)
self.logger.record("time/time_elapsed", int(time_elapsed), exclude="tensorboard")
self.logger.record("time/total_timesteps", self.num_timesteps, exclude="tensorboard")
self.logger.dump(step=self.num_timesteps)
self.train()
callback.on_training_end()
return self
|
/sb3_contrib-2.1.0.tar.gz/sb3_contrib-2.1.0/sb3_contrib/ppo_mask/ppo_mask.py
| 0.838415 | 0.409988 |
ppo_mask.py
|
pypi
|
import sys
import time
from copy import deepcopy
from typing import Any, ClassVar, Dict, Optional, Type, TypeVar, Union
import numpy as np
import torch as th
from gymnasium import spaces
from stable_baselines3.common.buffers import RolloutBuffer
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.on_policy_algorithm import OnPolicyAlgorithm
from stable_baselines3.common.policies import BasePolicy
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import explained_variance, get_schedule_fn, obs_as_tensor, safe_mean
from stable_baselines3.common.vec_env import VecEnv
from sb3_contrib.common.recurrent.buffers import RecurrentDictRolloutBuffer, RecurrentRolloutBuffer
from sb3_contrib.common.recurrent.policies import RecurrentActorCriticPolicy
from sb3_contrib.common.recurrent.type_aliases import RNNStates
from sb3_contrib.ppo_recurrent.policies import CnnLstmPolicy, MlpLstmPolicy, MultiInputLstmPolicy
SelfRecurrentPPO = TypeVar("SelfRecurrentPPO", bound="RecurrentPPO")
class RecurrentPPO(OnPolicyAlgorithm):
"""
Proximal Policy Optimization algorithm (PPO) (clip version)
with support for recurrent policies (LSTM).
Based on the original Stable Baselines 3 implementation.
Introduction to PPO: https://spinningup.openai.com/en/latest/algorithms/ppo.html
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: The learning rate, it can be a function
of the current progress remaining (from 1 to 0)
:param n_steps: The number of steps to run for each environment per update
(i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)
:param batch_size: Minibatch size
:param n_epochs: Number of epoch when optimizing the surrogate loss
:param gamma: Discount factor
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator
:param clip_range: Clipping parameter, it can be a function of the current progress
remaining (from 1 to 0).
:param clip_range_vf: Clipping parameter for the value function,
it can be a function of the current progress remaining (from 1 to 0).
This is a parameter specific to the OpenAI implementation. If None is passed (default),
no clipping will be done on the value function.
IMPORTANT: this clipping depends on the reward scaling.
:param normalize_advantage: Whether to normalize or not the advantage
:param ent_coef: Entropy coefficient for the loss calculation
:param vf_coef: Value function coefficient for the loss calculation
:param max_grad_norm: The maximum value for the gradient clipping
:param target_kl: Limit the KL divergence between updates,
because the clipping is not enough to prevent large update
see issue #213 (cf https://github.com/hill-a/stable-baselines/issues/213)
By default, there is no limit on the kl div.
:param stats_window_size: Window size for the rollout logging, specifying the number of episodes to average
the reported success rate, mean episode length, and mean reward over
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
policy_aliases: ClassVar[Dict[str, Type[BasePolicy]]] = {
"MlpLstmPolicy": MlpLstmPolicy,
"CnnLstmPolicy": CnnLstmPolicy,
"MultiInputLstmPolicy": MultiInputLstmPolicy,
}
def __init__(
self,
policy: Union[str, Type[RecurrentActorCriticPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 3e-4,
n_steps: int = 128,
batch_size: Optional[int] = 128,
n_epochs: int = 10,
gamma: float = 0.99,
gae_lambda: float = 0.95,
clip_range: Union[float, Schedule] = 0.2,
clip_range_vf: Union[None, float, Schedule] = None,
normalize_advantage: bool = True,
ent_coef: float = 0.0,
vf_coef: float = 0.5,
max_grad_norm: float = 0.5,
use_sde: bool = False,
sde_sample_freq: int = -1,
target_kl: Optional[float] = None,
stats_window_size: int = 100,
tensorboard_log: Optional[str] = None,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super().__init__(
policy,
env,
learning_rate=learning_rate,
n_steps=n_steps,
gamma=gamma,
gae_lambda=gae_lambda,
ent_coef=ent_coef,
vf_coef=vf_coef,
max_grad_norm=max_grad_norm,
use_sde=use_sde,
sde_sample_freq=sde_sample_freq,
stats_window_size=stats_window_size,
tensorboard_log=tensorboard_log,
policy_kwargs=policy_kwargs,
verbose=verbose,
seed=seed,
device=device,
_init_setup_model=False,
supported_action_spaces=(
spaces.Box,
spaces.Discrete,
spaces.MultiDiscrete,
spaces.MultiBinary,
),
)
self.batch_size = batch_size
self.n_epochs = n_epochs
self.clip_range = clip_range
self.clip_range_vf = clip_range_vf
self.normalize_advantage = normalize_advantage
self.target_kl = target_kl
self._last_lstm_states = None
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
self._setup_lr_schedule()
self.set_random_seed(self.seed)
buffer_cls = RecurrentDictRolloutBuffer if isinstance(self.observation_space, spaces.Dict) else RecurrentRolloutBuffer
self.policy = self.policy_class(
self.observation_space,
self.action_space,
self.lr_schedule,
use_sde=self.use_sde,
**self.policy_kwargs, # pytype:disable=not-instantiable
)
self.policy = self.policy.to(self.device)
# We assume that LSTM for the actor and the critic
# have the same architecture
lstm = self.policy.lstm_actor
if not isinstance(self.policy, RecurrentActorCriticPolicy):
raise ValueError("Policy must subclass RecurrentActorCriticPolicy")
single_hidden_state_shape = (lstm.num_layers, self.n_envs, lstm.hidden_size)
# hidden and cell states for actor and critic
self._last_lstm_states = RNNStates(
(
th.zeros(single_hidden_state_shape, device=self.device),
th.zeros(single_hidden_state_shape, device=self.device),
),
(
th.zeros(single_hidden_state_shape, device=self.device),
th.zeros(single_hidden_state_shape, device=self.device),
),
)
hidden_state_buffer_shape = (self.n_steps, lstm.num_layers, self.n_envs, lstm.hidden_size)
self.rollout_buffer = buffer_cls(
self.n_steps,
self.observation_space,
self.action_space,
hidden_state_buffer_shape,
self.device,
gamma=self.gamma,
gae_lambda=self.gae_lambda,
n_envs=self.n_envs,
)
# Initialize schedules for policy/value clipping
self.clip_range = get_schedule_fn(self.clip_range)
if self.clip_range_vf is not None:
if isinstance(self.clip_range_vf, (float, int)):
assert self.clip_range_vf > 0, "`clip_range_vf` must be positive, pass `None` to deactivate vf clipping"
self.clip_range_vf = get_schedule_fn(self.clip_range_vf)
def collect_rollouts(
self,
env: VecEnv,
callback: BaseCallback,
rollout_buffer: RolloutBuffer,
n_rollout_steps: int,
) -> bool:
"""
Collect experiences using the current policy and fill a ``RolloutBuffer``.
The term rollout here refers to the model-free notion and should not
be used with the concept of rollout used in model-based RL or planning.
:param env: The training environment
:param callback: Callback that will be called at each step
(and at the beginning and end of the rollout)
:param rollout_buffer: Buffer to fill with rollouts
:param n_steps: Number of experiences to collect per environment
:return: True if function returned with at least `n_rollout_steps`
collected, False if callback terminated rollout prematurely.
"""
assert isinstance(
rollout_buffer, (RecurrentRolloutBuffer, RecurrentDictRolloutBuffer)
), f"{rollout_buffer} doesn't support recurrent policy"
assert self._last_obs is not None, "No previous observation was provided"
# Switch to eval mode (this affects batch norm / dropout)
self.policy.set_training_mode(False)
n_steps = 0
rollout_buffer.reset()
# Sample new weights for the state dependent exploration
if self.use_sde:
self.policy.reset_noise(env.num_envs)
callback.on_rollout_start()
lstm_states = deepcopy(self._last_lstm_states)
while n_steps < n_rollout_steps:
if self.use_sde and self.sde_sample_freq > 0 and n_steps % self.sde_sample_freq == 0:
# Sample a new noise matrix
self.policy.reset_noise(env.num_envs)
with th.no_grad():
# Convert to pytorch tensor or to TensorDict
obs_tensor = obs_as_tensor(self._last_obs, self.device)
episode_starts = th.tensor(self._last_episode_starts, dtype=th.float32, device=self.device)
actions, values, log_probs, lstm_states = self.policy.forward(obs_tensor, lstm_states, episode_starts)
actions = actions.cpu().numpy()
# Rescale and perform action
clipped_actions = actions
# Clip the actions to avoid out of bound error
if isinstance(self.action_space, spaces.Box):
clipped_actions = np.clip(actions, self.action_space.low, self.action_space.high)
new_obs, rewards, dones, infos = env.step(clipped_actions)
self.num_timesteps += env.num_envs
# Give access to local variables
callback.update_locals(locals())
if callback.on_step() is False:
return False
self._update_info_buffer(infos)
n_steps += 1
if isinstance(self.action_space, spaces.Discrete):
# Reshape in case of discrete action
actions = actions.reshape(-1, 1)
# Handle timeout by bootstraping with value function
# see GitHub issue #633
for idx, done_ in enumerate(dones):
if (
done_
and infos[idx].get("terminal_observation") is not None
and infos[idx].get("TimeLimit.truncated", False)
):
terminal_obs = self.policy.obs_to_tensor(infos[idx]["terminal_observation"])[0]
with th.no_grad():
terminal_lstm_state = (
lstm_states.vf[0][:, idx : idx + 1, :].contiguous(),
lstm_states.vf[1][:, idx : idx + 1, :].contiguous(),
)
# terminal_lstm_state = None
episode_starts = th.tensor([False], dtype=th.float32, device=self.device)
terminal_value = self.policy.predict_values(terminal_obs, terminal_lstm_state, episode_starts)[0]
rewards[idx] += self.gamma * terminal_value
rollout_buffer.add(
self._last_obs,
actions,
rewards,
self._last_episode_starts,
values,
log_probs,
lstm_states=self._last_lstm_states,
)
self._last_obs = new_obs
self._last_episode_starts = dones
self._last_lstm_states = lstm_states
with th.no_grad():
# Compute value for the last timestep
episode_starts = th.tensor(dones, dtype=th.float32, device=self.device)
values = self.policy.predict_values(obs_as_tensor(new_obs, self.device), lstm_states.vf, episode_starts)
rollout_buffer.compute_returns_and_advantage(last_values=values, dones=dones)
callback.on_rollout_end()
return True
def train(self) -> None:
"""
Update policy using the currently gathered rollout buffer.
"""
# Switch to train mode (this affects batch norm / dropout)
self.policy.set_training_mode(True)
# Update optimizer learning rate
self._update_learning_rate(self.policy.optimizer)
# Compute current clip range
clip_range = self.clip_range(self._current_progress_remaining)
# Optional: clip range for the value function
if self.clip_range_vf is not None:
clip_range_vf = self.clip_range_vf(self._current_progress_remaining)
entropy_losses = []
pg_losses, value_losses = [], []
clip_fractions = []
continue_training = True
# train for n_epochs epochs
for epoch in range(self.n_epochs):
approx_kl_divs = []
# Do a complete pass on the rollout buffer
for rollout_data in self.rollout_buffer.get(self.batch_size):
actions = rollout_data.actions
if isinstance(self.action_space, spaces.Discrete):
# Convert discrete action from float to long
actions = rollout_data.actions.long().flatten()
# Convert mask from float to bool
mask = rollout_data.mask > 1e-8
# Re-sample the noise matrix because the log_std has changed
if self.use_sde:
self.policy.reset_noise(self.batch_size)
values, log_prob, entropy = self.policy.evaluate_actions(
rollout_data.observations,
actions,
rollout_data.lstm_states,
rollout_data.episode_starts,
)
values = values.flatten()
# Normalize advantage
advantages = rollout_data.advantages
if self.normalize_advantage:
advantages = (advantages - advantages[mask].mean()) / (advantages[mask].std() + 1e-8)
# ratio between old and new policy, should be one at the first iteration
ratio = th.exp(log_prob - rollout_data.old_log_prob)
# clipped surrogate loss
policy_loss_1 = advantages * ratio
policy_loss_2 = advantages * th.clamp(ratio, 1 - clip_range, 1 + clip_range)
policy_loss = -th.mean(th.min(policy_loss_1, policy_loss_2)[mask])
# Logging
pg_losses.append(policy_loss.item())
clip_fraction = th.mean((th.abs(ratio - 1) > clip_range).float()[mask]).item()
clip_fractions.append(clip_fraction)
if self.clip_range_vf is None:
# No clipping
values_pred = values
else:
# Clip the different between old and new value
# NOTE: this depends on the reward scaling
values_pred = rollout_data.old_values + th.clamp(
values - rollout_data.old_values, -clip_range_vf, clip_range_vf
)
# Value loss using the TD(gae_lambda) target
# Mask padded sequences
value_loss = th.mean(((rollout_data.returns - values_pred) ** 2)[mask])
value_losses.append(value_loss.item())
# Entropy loss favor exploration
if entropy is None:
# Approximate entropy when no analytical form
entropy_loss = -th.mean(-log_prob[mask])
else:
entropy_loss = -th.mean(entropy[mask])
entropy_losses.append(entropy_loss.item())
loss = policy_loss + self.ent_coef * entropy_loss + self.vf_coef * value_loss
# Calculate approximate form of reverse KL Divergence for early stopping
# see issue #417: https://github.com/DLR-RM/stable-baselines3/issues/417
# and discussion in PR #419: https://github.com/DLR-RM/stable-baselines3/pull/419
# and Schulman blog: http://joschu.net/blog/kl-approx.html
with th.no_grad():
log_ratio = log_prob - rollout_data.old_log_prob
approx_kl_div = th.mean(((th.exp(log_ratio) - 1) - log_ratio)[mask]).cpu().numpy()
approx_kl_divs.append(approx_kl_div)
if self.target_kl is not None and approx_kl_div > 1.5 * self.target_kl:
continue_training = False
if self.verbose >= 1:
print(f"Early stopping at step {epoch} due to reaching max kl: {approx_kl_div:.2f}")
break
# Optimization step
self.policy.optimizer.zero_grad()
loss.backward()
# Clip grad norm
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.policy.optimizer.step()
if not continue_training:
break
self._n_updates += self.n_epochs
explained_var = explained_variance(self.rollout_buffer.values.flatten(), self.rollout_buffer.returns.flatten())
# Logs
self.logger.record("train/entropy_loss", np.mean(entropy_losses))
self.logger.record("train/policy_gradient_loss", np.mean(pg_losses))
self.logger.record("train/value_loss", np.mean(value_losses))
self.logger.record("train/approx_kl", np.mean(approx_kl_divs))
self.logger.record("train/clip_fraction", np.mean(clip_fractions))
self.logger.record("train/loss", loss.item())
self.logger.record("train/explained_variance", explained_var)
if hasattr(self.policy, "log_std"):
self.logger.record("train/std", th.exp(self.policy.log_std).mean().item())
self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
self.logger.record("train/clip_range", clip_range)
if self.clip_range_vf is not None:
self.logger.record("train/clip_range_vf", clip_range_vf)
def learn(
self: SelfRecurrentPPO,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 1,
tb_log_name: str = "RecurrentPPO",
reset_num_timesteps: bool = True,
progress_bar: bool = False,
) -> SelfRecurrentPPO:
iteration = 0
total_timesteps, callback = self._setup_learn(
total_timesteps,
callback,
reset_num_timesteps,
tb_log_name,
progress_bar,
)
callback.on_training_start(locals(), globals())
while self.num_timesteps < total_timesteps:
continue_training = self.collect_rollouts(self.env, callback, self.rollout_buffer, n_rollout_steps=self.n_steps)
if continue_training is False:
break
iteration += 1
self._update_current_progress_remaining(self.num_timesteps, total_timesteps)
# Display training infos
if log_interval is not None and iteration % log_interval == 0:
time_elapsed = max((time.time_ns() - self.start_time) / 1e9, sys.float_info.epsilon)
fps = int((self.num_timesteps - self._num_timesteps_at_start) / time_elapsed)
self.logger.record("time/iterations", iteration, exclude="tensorboard")
if len(self.ep_info_buffer) > 0 and len(self.ep_info_buffer[0]) > 0:
self.logger.record("rollout/ep_rew_mean", safe_mean([ep_info["r"] for ep_info in self.ep_info_buffer]))
self.logger.record("rollout/ep_len_mean", safe_mean([ep_info["l"] for ep_info in self.ep_info_buffer]))
self.logger.record("time/fps", fps)
self.logger.record("time/time_elapsed", int(time_elapsed), exclude="tensorboard")
self.logger.record("time/total_timesteps", self.num_timesteps, exclude="tensorboard")
self.logger.dump(step=self.num_timesteps)
self.train()
callback.on_training_end()
return self
|
/sb3_contrib-2.1.0.tar.gz/sb3_contrib-2.1.0/sb3_contrib/ppo_recurrent/ppo_recurrent.py
| 0.807195 | 0.389605 |
ppo_recurrent.py
|
pypi
|
from typing import Callable, Optional, Sequence
import torch as th
from torch import nn
def quantile_huber_loss(
current_quantiles: th.Tensor,
target_quantiles: th.Tensor,
cum_prob: Optional[th.Tensor] = None,
sum_over_quantiles: bool = True,
) -> th.Tensor:
"""
The quantile-regression loss, as described in the QR-DQN and TQC papers.
Partially taken from https://github.com/bayesgroup/tqc_pytorch.
:param current_quantiles: current estimate of quantiles, must be either
(batch_size, n_quantiles) or (batch_size, n_critics, n_quantiles)
:param target_quantiles: target of quantiles, must be either (batch_size, n_target_quantiles),
(batch_size, 1, n_target_quantiles), or (batch_size, n_critics, n_target_quantiles)
:param cum_prob: cumulative probabilities to calculate quantiles (also called midpoints in QR-DQN paper),
must be either (batch_size, n_quantiles), (batch_size, 1, n_quantiles), or (batch_size, n_critics, n_quantiles).
(if None, calculating unit quantiles)
:param sum_over_quantiles: if summing over the quantile dimension or not
:return: the loss
"""
if current_quantiles.ndim != target_quantiles.ndim:
raise ValueError(
f"Error: The dimension of curremt_quantile ({current_quantiles.ndim}) needs to match "
f"the dimension of target_quantiles ({target_quantiles.ndim})."
)
if current_quantiles.shape[0] != target_quantiles.shape[0]:
raise ValueError(
f"Error: The batch size of curremt_quantile ({current_quantiles.shape[0]}) needs to match "
f"the batch size of target_quantiles ({target_quantiles.shape[0]})."
)
if current_quantiles.ndim not in (2, 3):
raise ValueError(f"Error: The dimension of current_quantiles ({current_quantiles.ndim}) needs to be either 2 or 3.")
if cum_prob is None:
n_quantiles = current_quantiles.shape[-1]
# Cumulative probabilities to calculate quantiles.
cum_prob = (th.arange(n_quantiles, device=current_quantiles.device, dtype=th.float) + 0.5) / n_quantiles
if current_quantiles.ndim == 2:
# For QR-DQN, current_quantiles have a shape (batch_size, n_quantiles), and make cum_prob
# broadcastable to (batch_size, n_quantiles, n_target_quantiles)
cum_prob = cum_prob.view(1, -1, 1)
elif current_quantiles.ndim == 3:
# For TQC, current_quantiles have a shape (batch_size, n_critics, n_quantiles), and make cum_prob
# broadcastable to (batch_size, n_critics, n_quantiles, n_target_quantiles)
cum_prob = cum_prob.view(1, 1, -1, 1)
# QR-DQN
# target_quantiles: (batch_size, n_target_quantiles) -> (batch_size, 1, n_target_quantiles)
# current_quantiles: (batch_size, n_quantiles) -> (batch_size, n_quantiles, 1)
# pairwise_delta: (batch_size, n_target_quantiles, n_quantiles)
# TQC
# target_quantiles: (batch_size, 1, n_target_quantiles) -> (batch_size, 1, 1, n_target_quantiles)
# current_quantiles: (batch_size, n_critics, n_quantiles) -> (batch_size, n_critics, n_quantiles, 1)
# pairwise_delta: (batch_size, n_critics, n_quantiles, n_target_quantiles)
# Note: in both cases, the loss has the same shape as pairwise_delta
pairwise_delta = target_quantiles.unsqueeze(-2) - current_quantiles.unsqueeze(-1)
abs_pairwise_delta = th.abs(pairwise_delta)
huber_loss = th.where(abs_pairwise_delta > 1, abs_pairwise_delta - 0.5, pairwise_delta**2 * 0.5)
loss = th.abs(cum_prob - (pairwise_delta.detach() < 0).float()) * huber_loss
if sum_over_quantiles:
loss = loss.sum(dim=-2).mean()
else:
loss = loss.mean()
return loss
def conjugate_gradient_solver(
matrix_vector_dot_fn: Callable[[th.Tensor], th.Tensor],
b,
max_iter=10,
residual_tol=1e-10,
) -> th.Tensor:
"""
Finds an approximate solution to a set of linear equations Ax = b
Sources:
- https://github.com/ajlangley/trpo-pytorch/blob/master/conjugate_gradient.py
- https://github.com/joschu/modular_rl/blob/master/modular_rl/trpo.py#L122
Reference:
- https://epubs.siam.org/doi/abs/10.1137/1.9781611971446.ch6
:param matrix_vector_dot_fn:
a function that right multiplies a matrix A by a vector v
:param b:
the right hand term in the set of linear equations Ax = b
:param max_iter:
the maximum number of iterations (default is 10)
:param residual_tol:
residual tolerance for early stopping of the solving (default is 1e-10)
:return x:
the approximate solution to the system of equations defined by `matrix_vector_dot_fn`
and b
"""
# The vector is not initialized at 0 because of the instability issues when the gradient becomes small.
# A small random gaussian noise is used for the initialization.
x = 1e-4 * th.randn_like(b)
residual = b - matrix_vector_dot_fn(x)
# Equivalent to th.linalg.norm(residual) ** 2 (L2 norm squared)
residual_squared_norm = th.matmul(residual, residual)
if residual_squared_norm < residual_tol:
# If the gradient becomes extremely small
# The denominator in alpha will become zero
# Leading to a division by zero
return x
p = residual.clone()
for i in range(max_iter):
# A @ p (matrix vector multiplication)
A_dot_p = matrix_vector_dot_fn(p)
alpha = residual_squared_norm / p.dot(A_dot_p)
x += alpha * p
if i == max_iter - 1:
return x
residual -= alpha * A_dot_p
new_residual_squared_norm = th.matmul(residual, residual)
if new_residual_squared_norm < residual_tol:
return x
beta = new_residual_squared_norm / residual_squared_norm
residual_squared_norm = new_residual_squared_norm
p = residual + beta * p
# Note: this return statement is only used when max_iter=0
return x
def flat_grad(
output,
parameters: Sequence[nn.parameter.Parameter],
create_graph: bool = False,
retain_graph: bool = False,
) -> th.Tensor:
"""
Returns the gradients of the passed sequence of parameters into a flat gradient.
Order of parameters is preserved.
:param output: functional output to compute the gradient for
:param parameters: sequence of ``Parameter``
:param retain_graph: If ``False``, the graph used to compute the grad will be freed.
Defaults to the value of ``create_graph``.
:param create_graph: If ``True``, graph of the derivative will be constructed,
allowing to compute higher order derivative products. Default: ``False``.
:return: Tensor containing the flattened gradients
"""
grads = th.autograd.grad(
output,
parameters,
create_graph=create_graph,
retain_graph=retain_graph,
allow_unused=True,
)
return th.cat([th.ravel(grad) for grad in grads if grad is not None])
|
/sb3_contrib-2.1.0.tar.gz/sb3_contrib-2.1.0/sb3_contrib/common/utils.py
| 0.972323 | 0.819965 |
utils.py
|
pypi
|
import multiprocessing as mp
from collections import defaultdict
from typing import Callable, List, Optional, Tuple, Union
import numpy as np
import torch as th
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3.common.policies import BasePolicy
from stable_baselines3.common.running_mean_std import RunningMeanStd
from stable_baselines3.common.vec_env import VecEnv, unwrap_vec_normalize
from stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper
def _worker(
remote: mp.connection.Connection,
parent_remote: mp.connection.Connection,
worker_env_wrapper: CloudpickleWrapper,
train_policy_wrapper: CloudpickleWrapper,
n_eval_episodes: int = 1,
) -> None:
"""
Function that will be run in each process.
It is in charge of creating environments, evaluating candidates
and communicating with the main process.
:param remote: Pipe to communicate with the parent process.
:param parent_remote:
:param worker_env_wrapper: Callable used to create the environment inside the process.
:param train_policy_wrapper: Callable used to create the policy inside the process.
:param n_eval_episodes: Number of evaluation episodes per candidate.
"""
parent_remote.close()
vec_env: VecEnv = worker_env_wrapper.var()
train_policy = train_policy_wrapper.var
vec_normalize = unwrap_vec_normalize(vec_env)
if vec_normalize is not None:
obs_rms = vec_normalize.obs_rms
else:
obs_rms = None
while True:
try:
cmd, data = remote.recv()
if cmd == "eval":
results = []
# Evaluate each candidate and save results
for weights_idx, candidate_weights in data:
train_policy.load_from_vector(candidate_weights.cpu())
episode_rewards, episode_lengths = evaluate_policy(
train_policy,
vec_env,
n_eval_episodes=n_eval_episodes,
return_episode_rewards=True,
warn=False,
)
results.append((weights_idx, (episode_rewards, episode_lengths)))
remote.send(results)
elif cmd == "seed":
# Note: the seed will only be effective at the next reset
remote.send(vec_env.seed(seed=data))
elif cmd == "get_obs_rms":
remote.send(obs_rms)
elif cmd == "sync_obs_rms":
vec_normalize.obs_rms = data
obs_rms = data
elif cmd == "close":
vec_env.close()
remote.close()
break
else:
raise NotImplementedError(f"`{cmd}` is not implemented in the worker")
except EOFError:
break
class AsyncEval:
"""
Helper class to do asynchronous evaluation of different policies with multiple processes.
It is useful when implementing population based methods like Evolution Strategies (ES),
Cross Entropy Method (CEM) or Augmented Random Search (ARS).
.. warning::
Only 'forkserver' and 'spawn' start methods are thread-safe,
which is important to avoid race conditions.
However, compared to
'fork' they incur a small start-up cost and have restrictions on
global variables. With those methods, users must wrap the code in an
``if __name__ == "__main__":`` block.
For more information, see the multiprocessing documentation.
:param envs_fn: Vectorized environments to run in subprocesses (callable)
:param train_policy: The policy object that will load the different candidate
weights.
:param start_method: method used to start the subprocesses.
Must be one of the methods returned by ``multiprocessing.get_all_start_methods()``.
Defaults to 'forkserver' on available platforms, and 'spawn' otherwise.
:param n_eval_episodes: The number of episodes to test each agent
"""
def __init__(
self,
envs_fn: List[Callable[[], VecEnv]],
train_policy: BasePolicy,
start_method: Optional[str] = None,
n_eval_episodes: int = 1,
):
self.waiting = False
self.closed = False
n_envs = len(envs_fn)
if start_method is None:
# Fork is not a thread safe method (see issue #217)
# but is more user friendly (does not require to wrap the code in
# a `if __name__ == "__main__":`)
forkserver_available = "forkserver" in mp.get_all_start_methods()
start_method = "forkserver" if forkserver_available else "spawn"
ctx = mp.get_context(start_method)
self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(n_envs)])
self.processes = []
for work_remote, remote, worker_env in zip(self.work_remotes, self.remotes, envs_fn):
args = (
work_remote,
remote,
CloudpickleWrapper(worker_env),
CloudpickleWrapper(train_policy),
n_eval_episodes,
)
# daemon=True: if the main process crashes, we should not cause things to hang
process = ctx.Process(target=_worker, args=args, daemon=True) # pytype:disable=attribute-error
process.start()
self.processes.append(process)
work_remote.close()
def send_jobs(self, candidate_weights: th.Tensor, pop_size: int) -> None:
"""
Send jobs to the workers to evaluate new candidates.
:param candidate_weights: The weights to be evaluated.
:pop_size: The number of candidate (size of the population)
"""
jobs_per_worker = defaultdict(list)
for weights_idx in range(pop_size):
jobs_per_worker[weights_idx % len(self.remotes)].append((weights_idx, candidate_weights[weights_idx]))
for remote_idx, remote in enumerate(self.remotes):
remote.send(("eval", jobs_per_worker[remote_idx]))
self.waiting = True
def seed(self, seed: Optional[int] = None) -> List[Union[None, int]]:
"""
Seed the environments.
:param seed: The seed for the pseudo-random generators.
:return:
"""
for idx, remote in enumerate(self.remotes):
remote.send(("seed", seed + idx))
return [remote.recv() for remote in self.remotes]
def get_results(self) -> List[Tuple[int, Tuple[np.ndarray, np.ndarray]]]:
"""
Retreive episode rewards and lengths from each worker
for all candidates (there might be multiple candidates per worker)
:return: A list of tuples containing each candidate index and its
result (episodic reward and episode length)
"""
results = [remote.recv() for remote in self.remotes]
flat_results = [result for worker_results in results for result in worker_results]
self.waiting = False
return flat_results
def get_obs_rms(self) -> List[RunningMeanStd]:
"""
Retrieve the observation filters (observation running mean std)
of each process, they will be combined in the main process.
Synchronisation is done afterward using ``sync_obs_rms()``.
:return: A list of ``RunningMeanStd`` objects (one per process)
"""
for remote in self.remotes:
remote.send(("get_obs_rms", None))
return [remote.recv() for remote in self.remotes]
def sync_obs_rms(self, obs_rms: RunningMeanStd) -> None:
"""
Synchronise (and update) the observation filters
(observation running mean std)
:param obs_rms: The updated ``RunningMeanStd`` to be used
by workers for normalizing observations.
"""
for remote in self.remotes:
remote.send(("sync_obs_rms", obs_rms))
def close(self) -> None:
"""
Close the processes.
"""
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(("close", None))
for process in self.processes:
process.join()
self.closed = True
|
/sb3_contrib-2.1.0.tar.gz/sb3_contrib-2.1.0/sb3_contrib/common/vec_env/async_eval.py
| 0.90425 | 0.355859 |
async_eval.py
|
pypi
|
from typing import Generator, NamedTuple, Optional, Union
import numpy as np
import torch as th
from gymnasium import spaces
from stable_baselines3.common.buffers import DictRolloutBuffer, RolloutBuffer
from stable_baselines3.common.type_aliases import TensorDict
from stable_baselines3.common.vec_env import VecNormalize
class MaskableRolloutBufferSamples(NamedTuple):
observations: th.Tensor
actions: th.Tensor
old_values: th.Tensor
old_log_prob: th.Tensor
advantages: th.Tensor
returns: th.Tensor
action_masks: th.Tensor
class MaskableDictRolloutBufferSamples(MaskableRolloutBufferSamples):
observations: TensorDict
actions: th.Tensor
old_values: th.Tensor
old_log_prob: th.Tensor
advantages: th.Tensor
returns: th.Tensor
action_masks: th.Tensor
class MaskableRolloutBuffer(RolloutBuffer):
"""
Rollout buffer that also stores the invalid action masks associated with each observation.
:param buffer_size: Max number of element in the buffer
:param observation_space: Observation space
:param action_space: Action space
:param device: PyTorch device
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator
Equivalent to classic advantage when set to 1.
:param gamma: Discount factor
:param n_envs: Number of parallel environments
"""
def __init__(
self,
buffer_size: int,
observation_space: spaces.Space,
action_space: spaces.Space,
device: Union[th.device, str] = "auto",
gae_lambda: float = 1,
gamma: float = 0.99,
n_envs: int = 1,
):
super().__init__(buffer_size, observation_space, action_space, device, gae_lambda, gamma, n_envs)
self.action_masks = None
def reset(self) -> None:
if isinstance(self.action_space, spaces.Discrete):
mask_dims = self.action_space.n
elif isinstance(self.action_space, spaces.MultiDiscrete):
mask_dims = sum(self.action_space.nvec)
elif isinstance(self.action_space, spaces.MultiBinary):
mask_dims = 2 * self.action_space.n # One mask per binary outcome
else:
raise ValueError(f"Unsupported action space {type(self.action_space)}")
self.mask_dims = mask_dims
self.action_masks = np.ones((self.buffer_size, self.n_envs, self.mask_dims), dtype=np.float32)
super().reset()
def add(self, *args, action_masks: Optional[np.ndarray] = None, **kwargs) -> None:
"""
:param action_masks: Masks applied to constrain the choice of possible actions.
"""
if action_masks is not None:
self.action_masks[self.pos] = action_masks.reshape((self.n_envs, self.mask_dims))
super().add(*args, **kwargs)
def get(self, batch_size: Optional[int] = None) -> Generator[MaskableRolloutBufferSamples, None, None]:
assert self.full, ""
indices = np.random.permutation(self.buffer_size * self.n_envs)
# Prepare the data
if not self.generator_ready:
for tensor in [
"observations",
"actions",
"values",
"log_probs",
"advantages",
"returns",
"action_masks",
]:
self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor])
self.generator_ready = True
# Return everything, don't create minibatches
if batch_size is None:
batch_size = self.buffer_size * self.n_envs
start_idx = 0
while start_idx < self.buffer_size * self.n_envs:
yield self._get_samples(indices[start_idx : start_idx + batch_size])
start_idx += batch_size
def _get_samples(self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None) -> MaskableRolloutBufferSamples:
data = (
self.observations[batch_inds],
self.actions[batch_inds],
self.values[batch_inds].flatten(),
self.log_probs[batch_inds].flatten(),
self.advantages[batch_inds].flatten(),
self.returns[batch_inds].flatten(),
self.action_masks[batch_inds].reshape(-1, self.mask_dims),
)
return MaskableRolloutBufferSamples(*map(self.to_torch, data))
class MaskableDictRolloutBuffer(DictRolloutBuffer):
"""
Dict Rollout buffer used in on-policy algorithms like A2C/PPO.
Extends the RolloutBuffer to use dictionary observations
It corresponds to ``buffer_size`` transitions collected
using the current policy.
This experience will be discarded after the policy update.
In order to use PPO objective, we also store the current value of each state
and the log probability of each taken action.
The term rollout here refers to the model-free notion and should not
be used with the concept of rollout used in model-based RL or planning.
Hence, it is only involved in policy and value function training but not action selection.
:param buffer_size: Max number of element in the buffer
:param observation_space: Observation space
:param action_space: Action space
:param device: PyTorch device
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator
Equivalent to classic advantage when set to 1.
:param gamma: Discount factor
:param n_envs: Number of parallel environments
"""
def __init__(
self,
buffer_size: int,
observation_space: spaces.Space,
action_space: spaces.Space,
device: Union[th.device, str] = "auto",
gae_lambda: float = 1,
gamma: float = 0.99,
n_envs: int = 1,
):
self.action_masks = None
super().__init__(buffer_size, observation_space, action_space, device, gae_lambda, gamma, n_envs=n_envs)
def reset(self) -> None:
if isinstance(self.action_space, spaces.Discrete):
mask_dims = self.action_space.n
elif isinstance(self.action_space, spaces.MultiDiscrete):
mask_dims = sum(self.action_space.nvec)
elif isinstance(self.action_space, spaces.MultiBinary):
mask_dims = 2 * self.action_space.n # One mask per binary outcome
else:
raise ValueError(f"Unsupported action space {type(self.action_space)}")
self.mask_dims = mask_dims
self.action_masks = np.ones((self.buffer_size, self.n_envs, self.mask_dims), dtype=np.float32)
super().reset()
def add(self, *args, action_masks: Optional[np.ndarray] = None, **kwargs) -> None:
"""
:param action_masks: Masks applied to constrain the choice of possible actions.
"""
if action_masks is not None:
self.action_masks[self.pos] = action_masks.reshape((self.n_envs, self.mask_dims))
super().add(*args, **kwargs)
def get(self, batch_size: Optional[int] = None) -> Generator[MaskableDictRolloutBufferSamples, None, None]:
assert self.full, ""
indices = np.random.permutation(self.buffer_size * self.n_envs)
# Prepare the data
if not self.generator_ready:
for key, obs in self.observations.items():
self.observations[key] = self.swap_and_flatten(obs)
_tensor_names = ["actions", "values", "log_probs", "advantages", "returns", "action_masks"]
for tensor in _tensor_names:
self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor])
self.generator_ready = True
# Return everything, don't create minibatches
if batch_size is None:
batch_size = self.buffer_size * self.n_envs
start_idx = 0
while start_idx < self.buffer_size * self.n_envs:
yield self._get_samples(indices[start_idx : start_idx + batch_size])
start_idx += batch_size
def _get_samples(self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None) -> MaskableDictRolloutBufferSamples:
return MaskableDictRolloutBufferSamples(
observations={key: self.to_torch(obs[batch_inds]) for (key, obs) in self.observations.items()},
actions=self.to_torch(self.actions[batch_inds]),
old_values=self.to_torch(self.values[batch_inds].flatten()),
old_log_prob=self.to_torch(self.log_probs[batch_inds].flatten()),
advantages=self.to_torch(self.advantages[batch_inds].flatten()),
returns=self.to_torch(self.returns[batch_inds].flatten()),
action_masks=self.to_torch(self.action_masks[batch_inds].reshape(-1, self.mask_dims)),
)
|
/sb3_contrib-2.1.0.tar.gz/sb3_contrib-2.1.0/sb3_contrib/common/maskable/buffers.py
| 0.960166 | 0.539044 |
buffers.py
|
pypi
|
import os
import numpy as np
from stable_baselines3.common.callbacks import EvalCallback
from stable_baselines3.common.vec_env import sync_envs_normalization
from sb3_contrib.common.maskable.evaluation import evaluate_policy
class MaskableEvalCallback(EvalCallback):
"""
Callback for evaluating an agent. Supports invalid action masking.
:param eval_env: The environment used for initialization
:param callback_on_new_best: Callback to trigger
when there is a new best model according to the ``mean_reward``
:param callback_after_eval: Callback to trigger after every evaluation
when there is a new best model according to the ``mean_reward``
:param n_eval_episodes: The number of episodes to test the agent
:param eval_freq: Evaluate the agent every eval_freq call of the callback.
:param log_path: Path to a folder where the evaluations (``evaluations.npz``)
will be saved. It will be updated at each evaluation.
:param best_model_save_path: Path to a folder where the best model
according to performance on the eval env will be saved.
:param deterministic: Whether the evaluation should
use a stochastic or deterministic actions.
:param render: Whether to render or not the environment during evaluation
:param verbose:
:param warn: Passed to ``evaluate_policy`` (warns if ``eval_env`` has not been
wrapped with a Monitor wrapper)
:param use_masking: Whether to use invalid action masks during evaluation
"""
def __init__(self, *args, use_masking: bool = True, **kwargs):
super().__init__(*args, **kwargs)
self.use_masking = use_masking
def _on_step(self) -> bool:
continue_training = True
if self.eval_freq > 0 and self.n_calls % self.eval_freq == 0:
# Sync training and eval env if there is VecNormalize
if self.model.get_vec_normalize_env() is not None:
try:
sync_envs_normalization(self.training_env, self.eval_env)
except AttributeError as e:
raise AssertionError(
"Training and eval env are not wrapped the same way, "
"see https://stable-baselines3.readthedocs.io/en/master/guide/callbacks.html#evalcallback "
"and warning above."
) from e
# Reset success rate buffer
self._is_success_buffer = []
# Note that evaluate_policy() has been patched to support masking
episode_rewards, episode_lengths = evaluate_policy(
self.model,
self.eval_env,
n_eval_episodes=self.n_eval_episodes,
render=self.render,
deterministic=self.deterministic,
return_episode_rewards=True,
warn=self.warn,
callback=self._log_success_callback,
use_masking=self.use_masking,
)
if self.log_path is not None:
self.evaluations_timesteps.append(self.num_timesteps)
self.evaluations_results.append(episode_rewards)
self.evaluations_length.append(episode_lengths)
kwargs = {}
# Save success log if present
if len(self._is_success_buffer) > 0:
self.evaluations_successes.append(self._is_success_buffer)
kwargs = dict(successes=self.evaluations_successes)
np.savez(
self.log_path,
timesteps=self.evaluations_timesteps,
results=self.evaluations_results,
ep_lengths=self.evaluations_length,
**kwargs,
)
mean_reward, std_reward = np.mean(episode_rewards), np.std(episode_rewards)
mean_ep_length, std_ep_length = np.mean(episode_lengths), np.std(episode_lengths)
self.last_mean_reward = mean_reward
if self.verbose > 0:
print(f"Eval num_timesteps={self.num_timesteps}, " f"episode_reward={mean_reward:.2f} +/- {std_reward:.2f}")
print(f"Episode length: {mean_ep_length:.2f} +/- {std_ep_length:.2f}")
# Add to current Logger
self.logger.record("eval/mean_reward", float(mean_reward))
self.logger.record("eval/mean_ep_length", mean_ep_length)
if len(self._is_success_buffer) > 0:
success_rate = np.mean(self._is_success_buffer)
if self.verbose > 0:
print(f"Success rate: {100 * success_rate:.2f}%")
self.logger.record("eval/success_rate", success_rate)
# Dump log so the evaluation results are printed with the correct timestep
self.logger.record("time/total_timesteps", self.num_timesteps, exclude="tensorboard")
self.logger.dump(self.num_timesteps)
if mean_reward > self.best_mean_reward:
if self.verbose > 0:
print("New best mean reward!")
if self.best_model_save_path is not None:
self.model.save(os.path.join(self.best_model_save_path, "best_model"))
self.best_mean_reward = mean_reward
# Trigger callback on new best model, if needed
if self.callback_on_new_best is not None:
continue_training = self.callback_on_new_best.on_step()
# Trigger callback after every evaluation, if needed
if self.callback is not None:
continue_training = continue_training and self._on_event()
return continue_training
|
/sb3_contrib-2.1.0.tar.gz/sb3_contrib-2.1.0/sb3_contrib/common/maskable/callbacks.py
| 0.802826 | 0.433382 |
callbacks.py
|
pypi
|
import warnings
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import gymnasium as gym
import numpy as np
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.vec_env import DummyVecEnv, VecEnv, VecMonitor, is_vecenv_wrapped
from sb3_contrib.common.maskable.utils import get_action_masks, is_masking_supported
from sb3_contrib.ppo_mask import MaskablePPO
def evaluate_policy(
model: MaskablePPO,
env: Union[gym.Env, VecEnv],
n_eval_episodes: int = 10,
deterministic: bool = True,
render: bool = False,
callback: Optional[Callable[[Dict[str, Any], Dict[str, Any]], None]] = None,
reward_threshold: Optional[float] = None,
return_episode_rewards: bool = False,
warn: bool = True,
use_masking: bool = True,
) -> Union[Tuple[float, float], Tuple[List[float], List[int]]]:
"""
Runs policy for ``n_eval_episodes`` episodes and returns average reward.
If a vector env is passed in, this divides the episodes to evaluate onto the
different elements of the vector env. This static division of work is done to
remove bias. See https://github.com/DLR-RM/stable-baselines3/issues/402 for more
details and discussion.
.. note::
If environment has not been wrapped with ``Monitor`` wrapper, reward and
episode lengths are counted as it appears with ``env.step`` calls. If
the environment contains wrappers that modify rewards or episode lengths
(e.g. reward scaling, early episode reset), these will affect the evaluation
results as well. You can avoid this by wrapping environment with ``Monitor``
wrapper before anything else.
:param model: The RL agent you want to evaluate.
:param env: The gym environment. In the case of a ``VecEnv``
this must contain only one environment.
:param n_eval_episodes: Number of episode to evaluate the agent
:param deterministic: Whether to use deterministic or stochastic actions
:param render: Whether to render the environment or not
:param callback: callback function to do additional checks,
called after each step. Gets locals() and globals() passed as parameters.
:param reward_threshold: Minimum expected reward per episode,
this will raise an error if the performance is not met
:param return_episode_rewards: If True, a list of rewards and episde lengths
per episode will be returned instead of the mean.
:param warn: If True (default), warns user about lack of a Monitor wrapper in the
evaluation environment.
:param use_masking: Whether or not to use invalid action masks during evaluation
:return: Mean reward per episode, std of reward per episode.
Returns ([float], [int]) when ``return_episode_rewards`` is True, first
list containing per-episode rewards and second containing per-episode lengths
(in number of steps).
"""
if use_masking and not is_masking_supported(env):
raise ValueError("Environment does not support action masking. Consider using ActionMasker wrapper")
is_monitor_wrapped = False
if not isinstance(env, VecEnv):
env = DummyVecEnv([lambda: env]) # type: ignore[list-item, return-value]
is_monitor_wrapped = is_vecenv_wrapped(env, VecMonitor) or env.env_is_wrapped(Monitor)[0]
if not is_monitor_wrapped and warn:
warnings.warn(
"Evaluation environment is not wrapped with a ``Monitor`` wrapper. "
"This may result in reporting modified episode lengths and rewards, if other wrappers happen to modify these. "
"Consider wrapping environment first with ``Monitor`` wrapper.",
UserWarning,
)
n_envs = env.num_envs
episode_rewards = []
episode_lengths = []
episode_counts = np.zeros(n_envs, dtype="int")
# Divides episodes among different sub environments in the vector as evenly as possible
episode_count_targets = np.array([(n_eval_episodes + i) // n_envs for i in range(n_envs)], dtype="int")
current_rewards = np.zeros(n_envs)
current_lengths = np.zeros(n_envs, dtype="int")
observations = env.reset()
states = None
episode_starts = np.ones((env.num_envs,), dtype=bool)
while (episode_counts < episode_count_targets).any():
if use_masking:
action_masks = get_action_masks(env)
actions, state = model.predict(
observations, # type: ignore[arg-type]
state=states,
episode_start=episode_starts,
deterministic=deterministic,
action_masks=action_masks,
)
else:
actions, states = model.predict(
observations, # type: ignore[arg-type]
state=states,
episode_start=episode_starts,
deterministic=deterministic,
)
observations, rewards, dones, infos = env.step(actions)
current_rewards += rewards
current_lengths += 1
for i in range(n_envs):
if episode_counts[i] < episode_count_targets[i]:
# unpack values so that the callback can access the local variables
reward = rewards[i]
done = dones[i]
info = infos[i]
episode_starts[i] = done
if callback is not None:
callback(locals(), globals())
if dones[i]:
if is_monitor_wrapped:
# Atari wrapper can send a "done" signal when
# the agent loses a life, but it does not correspond
# to the true end of episode
if "episode" in info.keys():
# Do not trust "done" with episode endings.
# Monitor wrapper includes "episode" key in info if environment
# has been wrapped with it. Use those rewards instead.
episode_rewards.append(info["episode"]["r"])
episode_lengths.append(info["episode"]["l"])
# Only increment at the real end of an episode
episode_counts[i] += 1
else:
episode_rewards.append(current_rewards[i])
episode_lengths.append(current_lengths[i])
episode_counts[i] += 1
current_rewards[i] = 0
current_lengths[i] = 0
if render:
env.render()
mean_reward = np.mean(episode_rewards)
std_reward = np.std(episode_rewards)
if reward_threshold is not None:
assert mean_reward > reward_threshold, "Mean reward below threshold: " f"{mean_reward:.2f} < {reward_threshold:.2f}"
if return_episode_rewards:
return episode_rewards, episode_lengths
return mean_reward, std_reward
|
/sb3_contrib-2.1.0.tar.gz/sb3_contrib-2.1.0/sb3_contrib/common/maskable/evaluation.py
| 0.897555 | 0.58255 |
evaluation.py
|
pypi
|
from functools import partial
from typing import Callable, Generator, Optional, Tuple, Union
import numpy as np
import torch as th
from gymnasium import spaces
from stable_baselines3.common.buffers import DictRolloutBuffer, RolloutBuffer
from stable_baselines3.common.vec_env import VecNormalize
from sb3_contrib.common.recurrent.type_aliases import (
RecurrentDictRolloutBufferSamples,
RecurrentRolloutBufferSamples,
RNNStates,
)
def pad(
seq_start_indices: np.ndarray,
seq_end_indices: np.ndarray,
device: th.device,
tensor: np.ndarray,
padding_value: float = 0.0,
) -> th.Tensor:
"""
Chunk sequences and pad them to have constant dimensions.
:param seq_start_indices: Indices of the transitions that start a sequence
:param seq_end_indices: Indices of the transitions that end a sequence
:param device: PyTorch device
:param tensor: Tensor of shape (batch_size, *tensor_shape)
:param padding_value: Value used to pad sequence to the same length
(zero padding by default)
:return: (n_seq, max_length, *tensor_shape)
"""
# Create sequences given start and end
seq = [th.tensor(tensor[start : end + 1], device=device) for start, end in zip(seq_start_indices, seq_end_indices)]
return th.nn.utils.rnn.pad_sequence(seq, batch_first=True, padding_value=padding_value)
def pad_and_flatten(
seq_start_indices: np.ndarray,
seq_end_indices: np.ndarray,
device: th.device,
tensor: np.ndarray,
padding_value: float = 0.0,
) -> th.Tensor:
"""
Pad and flatten the sequences of scalar values,
while keeping the sequence order.
From (batch_size, 1) to (n_seq, max_length, 1) -> (n_seq * max_length,)
:param seq_start_indices: Indices of the transitions that start a sequence
:param seq_end_indices: Indices of the transitions that end a sequence
:param device: PyTorch device (cpu, gpu, ...)
:param tensor: Tensor of shape (max_length, n_seq, 1)
:param padding_value: Value used to pad sequence to the same length
(zero padding by default)
:return: (n_seq * max_length,) aka (padded_batch_size,)
"""
return pad(seq_start_indices, seq_end_indices, device, tensor, padding_value).flatten()
def create_sequencers(
episode_starts: np.ndarray,
env_change: np.ndarray,
device: th.device,
) -> Tuple[np.ndarray, Callable, Callable]:
"""
Create the utility function to chunk data into
sequences and pad them to create fixed size tensors.
:param episode_starts: Indices where an episode starts
:param env_change: Indices where the data collected
come from a different env (when using multiple env for data collection)
:param device: PyTorch device
:return: Indices of the transitions that start a sequence,
pad and pad_and_flatten utilities tailored for this batch
(sequence starts and ends indices are fixed)
"""
# Create sequence if env changes too
seq_start = np.logical_or(episode_starts, env_change).flatten()
# First index is always the beginning of a sequence
seq_start[0] = True
# Retrieve indices of sequence starts
seq_start_indices = np.where(seq_start == True)[0] # noqa: E712
# End of sequence are just before sequence starts
# Last index is also always end of a sequence
seq_end_indices = np.concatenate([(seq_start_indices - 1)[1:], np.array([len(episode_starts)])])
# Create padding method for this minibatch
# to avoid repeating arguments (seq_start_indices, seq_end_indices)
local_pad = partial(pad, seq_start_indices, seq_end_indices, device)
local_pad_and_flatten = partial(pad_and_flatten, seq_start_indices, seq_end_indices, device)
return seq_start_indices, local_pad, local_pad_and_flatten
class RecurrentRolloutBuffer(RolloutBuffer):
"""
Rollout buffer that also stores the LSTM cell and hidden states.
:param buffer_size: Max number of element in the buffer
:param observation_space: Observation space
:param action_space: Action space
:param hidden_state_shape: Shape of the buffer that will collect lstm states
(n_steps, lstm.num_layers, n_envs, lstm.hidden_size)
:param device: PyTorch device
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator
Equivalent to classic advantage when set to 1.
:param gamma: Discount factor
:param n_envs: Number of parallel environments
"""
def __init__(
self,
buffer_size: int,
observation_space: spaces.Space,
action_space: spaces.Space,
hidden_state_shape: Tuple[int, int, int, int],
device: Union[th.device, str] = "auto",
gae_lambda: float = 1,
gamma: float = 0.99,
n_envs: int = 1,
):
self.hidden_state_shape = hidden_state_shape
self.seq_start_indices, self.seq_end_indices = None, None
super().__init__(buffer_size, observation_space, action_space, device, gae_lambda, gamma, n_envs)
def reset(self):
super().reset()
self.hidden_states_pi = np.zeros(self.hidden_state_shape, dtype=np.float32)
self.cell_states_pi = np.zeros(self.hidden_state_shape, dtype=np.float32)
self.hidden_states_vf = np.zeros(self.hidden_state_shape, dtype=np.float32)
self.cell_states_vf = np.zeros(self.hidden_state_shape, dtype=np.float32)
def add(self, *args, lstm_states: RNNStates, **kwargs) -> None:
"""
:param hidden_states: LSTM cell and hidden state
"""
self.hidden_states_pi[self.pos] = np.array(lstm_states.pi[0].cpu().numpy())
self.cell_states_pi[self.pos] = np.array(lstm_states.pi[1].cpu().numpy())
self.hidden_states_vf[self.pos] = np.array(lstm_states.vf[0].cpu().numpy())
self.cell_states_vf[self.pos] = np.array(lstm_states.vf[1].cpu().numpy())
super().add(*args, **kwargs)
def get(self, batch_size: Optional[int] = None) -> Generator[RecurrentRolloutBufferSamples, None, None]:
assert self.full, "Rollout buffer must be full before sampling from it"
# Prepare the data
if not self.generator_ready:
# hidden_state_shape = (self.n_steps, lstm.num_layers, self.n_envs, lstm.hidden_size)
# swap first to (self.n_steps, self.n_envs, lstm.num_layers, lstm.hidden_size)
for tensor in ["hidden_states_pi", "cell_states_pi", "hidden_states_vf", "cell_states_vf"]:
self.__dict__[tensor] = self.__dict__[tensor].swapaxes(1, 2)
# flatten but keep the sequence order
# 1. (n_steps, n_envs, *tensor_shape) -> (n_envs, n_steps, *tensor_shape)
# 2. (n_envs, n_steps, *tensor_shape) -> (n_envs * n_steps, *tensor_shape)
for tensor in [
"observations",
"actions",
"values",
"log_probs",
"advantages",
"returns",
"hidden_states_pi",
"cell_states_pi",
"hidden_states_vf",
"cell_states_vf",
"episode_starts",
]:
self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor])
self.generator_ready = True
# Return everything, don't create minibatches
if batch_size is None:
batch_size = self.buffer_size * self.n_envs
# Sampling strategy that allows any mini batch size but requires
# more complexity and use of padding
# Trick to shuffle a bit: keep the sequence order
# but split the indices in two
split_index = np.random.randint(self.buffer_size * self.n_envs)
indices = np.arange(self.buffer_size * self.n_envs)
indices = np.concatenate((indices[split_index:], indices[:split_index]))
env_change = np.zeros(self.buffer_size * self.n_envs).reshape(self.buffer_size, self.n_envs)
# Flag first timestep as change of environment
env_change[0, :] = 1.0
env_change = self.swap_and_flatten(env_change)
start_idx = 0
while start_idx < self.buffer_size * self.n_envs:
batch_inds = indices[start_idx : start_idx + batch_size]
yield self._get_samples(batch_inds, env_change)
start_idx += batch_size
def _get_samples(
self,
batch_inds: np.ndarray,
env_change: np.ndarray,
env: Optional[VecNormalize] = None,
) -> RecurrentRolloutBufferSamples:
# Retrieve sequence starts and utility function
self.seq_start_indices, self.pad, self.pad_and_flatten = create_sequencers(
self.episode_starts[batch_inds], env_change[batch_inds], self.device
)
# Number of sequences
n_seq = len(self.seq_start_indices)
max_length = self.pad(self.actions[batch_inds]).shape[1]
padded_batch_size = n_seq * max_length
# We retrieve the lstm hidden states that will allow
# to properly initialize the LSTM at the beginning of each sequence
lstm_states_pi = (
# 1. (n_envs * n_steps, n_layers, dim) -> (batch_size, n_layers, dim)
# 2. (batch_size, n_layers, dim) -> (n_seq, n_layers, dim)
# 3. (n_seq, n_layers, dim) -> (n_layers, n_seq, dim)
self.hidden_states_pi[batch_inds][self.seq_start_indices].swapaxes(0, 1),
self.cell_states_pi[batch_inds][self.seq_start_indices].swapaxes(0, 1),
)
lstm_states_vf = (
# (n_envs * n_steps, n_layers, dim) -> (n_layers, n_seq, dim)
self.hidden_states_vf[batch_inds][self.seq_start_indices].swapaxes(0, 1),
self.cell_states_vf[batch_inds][self.seq_start_indices].swapaxes(0, 1),
)
lstm_states_pi = (self.to_torch(lstm_states_pi[0]).contiguous(), self.to_torch(lstm_states_pi[1]).contiguous())
lstm_states_vf = (self.to_torch(lstm_states_vf[0]).contiguous(), self.to_torch(lstm_states_vf[1]).contiguous())
return RecurrentRolloutBufferSamples(
# (batch_size, obs_dim) -> (n_seq, max_length, obs_dim) -> (n_seq * max_length, obs_dim)
observations=self.pad(self.observations[batch_inds]).reshape((padded_batch_size, *self.obs_shape)),
actions=self.pad(self.actions[batch_inds]).reshape((padded_batch_size,) + self.actions.shape[1:]),
old_values=self.pad_and_flatten(self.values[batch_inds]),
old_log_prob=self.pad_and_flatten(self.log_probs[batch_inds]),
advantages=self.pad_and_flatten(self.advantages[batch_inds]),
returns=self.pad_and_flatten(self.returns[batch_inds]),
lstm_states=RNNStates(lstm_states_pi, lstm_states_vf),
episode_starts=self.pad_and_flatten(self.episode_starts[batch_inds]),
mask=self.pad_and_flatten(np.ones_like(self.returns[batch_inds])),
)
class RecurrentDictRolloutBuffer(DictRolloutBuffer):
"""
Dict Rollout buffer used in on-policy algorithms like A2C/PPO.
Extends the RecurrentRolloutBuffer to use dictionary observations
:param buffer_size: Max number of element in the buffer
:param observation_space: Observation space
:param action_space: Action space
:param hidden_state_shape: Shape of the buffer that will collect lstm states
:param device: PyTorch device
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator
Equivalent to classic advantage when set to 1.
:param gamma: Discount factor
:param n_envs: Number of parallel environments
"""
def __init__(
self,
buffer_size: int,
observation_space: spaces.Space,
action_space: spaces.Space,
hidden_state_shape: Tuple[int, int, int, int],
device: Union[th.device, str] = "auto",
gae_lambda: float = 1,
gamma: float = 0.99,
n_envs: int = 1,
):
self.hidden_state_shape = hidden_state_shape
self.seq_start_indices, self.seq_end_indices = None, None
super().__init__(buffer_size, observation_space, action_space, device, gae_lambda, gamma, n_envs=n_envs)
def reset(self):
super().reset()
self.hidden_states_pi = np.zeros(self.hidden_state_shape, dtype=np.float32)
self.cell_states_pi = np.zeros(self.hidden_state_shape, dtype=np.float32)
self.hidden_states_vf = np.zeros(self.hidden_state_shape, dtype=np.float32)
self.cell_states_vf = np.zeros(self.hidden_state_shape, dtype=np.float32)
def add(self, *args, lstm_states: RNNStates, **kwargs) -> None:
"""
:param hidden_states: LSTM cell and hidden state
"""
self.hidden_states_pi[self.pos] = np.array(lstm_states.pi[0].cpu().numpy())
self.cell_states_pi[self.pos] = np.array(lstm_states.pi[1].cpu().numpy())
self.hidden_states_vf[self.pos] = np.array(lstm_states.vf[0].cpu().numpy())
self.cell_states_vf[self.pos] = np.array(lstm_states.vf[1].cpu().numpy())
super().add(*args, **kwargs)
def get(self, batch_size: Optional[int] = None) -> Generator[RecurrentDictRolloutBufferSamples, None, None]:
assert self.full, "Rollout buffer must be full before sampling from it"
# Prepare the data
if not self.generator_ready:
# hidden_state_shape = (self.n_steps, lstm.num_layers, self.n_envs, lstm.hidden_size)
# swap first to (self.n_steps, self.n_envs, lstm.num_layers, lstm.hidden_size)
for tensor in ["hidden_states_pi", "cell_states_pi", "hidden_states_vf", "cell_states_vf"]:
self.__dict__[tensor] = self.__dict__[tensor].swapaxes(1, 2)
for key, obs in self.observations.items():
self.observations[key] = self.swap_and_flatten(obs)
for tensor in [
"actions",
"values",
"log_probs",
"advantages",
"returns",
"hidden_states_pi",
"cell_states_pi",
"hidden_states_vf",
"cell_states_vf",
"episode_starts",
]:
self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor])
self.generator_ready = True
# Return everything, don't create minibatches
if batch_size is None:
batch_size = self.buffer_size * self.n_envs
# Trick to shuffle a bit: keep the sequence order
# but split the indices in two
split_index = np.random.randint(self.buffer_size * self.n_envs)
indices = np.arange(self.buffer_size * self.n_envs)
indices = np.concatenate((indices[split_index:], indices[:split_index]))
env_change = np.zeros(self.buffer_size * self.n_envs).reshape(self.buffer_size, self.n_envs)
# Flag first timestep as change of environment
env_change[0, :] = 1.0
env_change = self.swap_and_flatten(env_change)
start_idx = 0
while start_idx < self.buffer_size * self.n_envs:
batch_inds = indices[start_idx : start_idx + batch_size]
yield self._get_samples(batch_inds, env_change)
start_idx += batch_size
def _get_samples(
self,
batch_inds: np.ndarray,
env_change: np.ndarray,
env: Optional[VecNormalize] = None,
) -> RecurrentDictRolloutBufferSamples:
# Retrieve sequence starts and utility function
self.seq_start_indices, self.pad, self.pad_and_flatten = create_sequencers(
self.episode_starts[batch_inds], env_change[batch_inds], self.device
)
n_seq = len(self.seq_start_indices)
max_length = self.pad(self.actions[batch_inds]).shape[1]
padded_batch_size = n_seq * max_length
# We retrieve the lstm hidden states that will allow
# to properly initialize the LSTM at the beginning of each sequence
lstm_states_pi = (
# (n_envs * n_steps, n_layers, dim) -> (n_layers, n_seq, dim)
self.hidden_states_pi[batch_inds][self.seq_start_indices].swapaxes(0, 1),
self.cell_states_pi[batch_inds][self.seq_start_indices].swapaxes(0, 1),
)
lstm_states_vf = (
# (n_envs * n_steps, n_layers, dim) -> (n_layers, n_seq, dim)
self.hidden_states_vf[batch_inds][self.seq_start_indices].swapaxes(0, 1),
self.cell_states_vf[batch_inds][self.seq_start_indices].swapaxes(0, 1),
)
lstm_states_pi = (self.to_torch(lstm_states_pi[0]).contiguous(), self.to_torch(lstm_states_pi[1]).contiguous())
lstm_states_vf = (self.to_torch(lstm_states_vf[0]).contiguous(), self.to_torch(lstm_states_vf[1]).contiguous())
observations = {key: self.pad(obs[batch_inds]) for (key, obs) in self.observations.items()}
observations = {key: obs.reshape((padded_batch_size,) + self.obs_shape[key]) for (key, obs) in observations.items()}
return RecurrentDictRolloutBufferSamples(
observations=observations,
actions=self.pad(self.actions[batch_inds]).reshape((padded_batch_size,) + self.actions.shape[1:]),
old_values=self.pad_and_flatten(self.values[batch_inds]),
old_log_prob=self.pad_and_flatten(self.log_probs[batch_inds]),
advantages=self.pad_and_flatten(self.advantages[batch_inds]),
returns=self.pad_and_flatten(self.returns[batch_inds]),
lstm_states=RNNStates(lstm_states_pi, lstm_states_vf),
episode_starts=self.pad_and_flatten(self.episode_starts[batch_inds]),
mask=self.pad_and_flatten(np.ones_like(self.returns[batch_inds])),
)
|
/sb3_contrib-2.1.0.tar.gz/sb3_contrib-2.1.0/sb3_contrib/common/recurrent/buffers.py
| 0.964288 | 0.517388 |
buffers.py
|
pypi
|
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import numpy as np
import torch as th
from gymnasium import spaces
from stable_baselines3.common.distributions import Distribution
from stable_baselines3.common.policies import ActorCriticPolicy
from stable_baselines3.common.torch_layers import (
BaseFeaturesExtractor,
CombinedExtractor,
FlattenExtractor,
MlpExtractor,
NatureCNN,
)
from stable_baselines3.common.type_aliases import Schedule
from stable_baselines3.common.utils import zip_strict
from torch import nn
from sb3_contrib.common.recurrent.type_aliases import RNNStates
class RecurrentActorCriticPolicy(ActorCriticPolicy):
"""
Recurrent policy class for actor-critic algorithms (has both policy and value prediction).
To be used with A2C, PPO and the likes.
It assumes that both the actor and the critic LSTM
have the same architecture.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param share_features_extractor: If True, the features extractor is shared between the policy and value networks.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
:param lstm_hidden_size: Number of hidden units for each LSTM layer.
:param n_lstm_layers: Number of LSTM layers.
:param shared_lstm: Whether the LSTM is shared between the actor and the critic
(in that case, only the actor gradient is used)
By default, the actor and the critic have two separate LSTM.
:param enable_critic_lstm: Use a seperate LSTM for the critic.
:param lstm_kwargs: Additional keyword arguments to pass the the LSTM
constructor.
"""
def __init__(
self,
observation_space: spaces.Space,
action_space: spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
share_features_extractor: bool = True,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
lstm_hidden_size: int = 256,
n_lstm_layers: int = 1,
shared_lstm: bool = False,
enable_critic_lstm: bool = True,
lstm_kwargs: Optional[Dict[str, Any]] = None,
):
self.lstm_output_dim = lstm_hidden_size
super().__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
ortho_init,
use_sde,
log_std_init,
full_std,
use_expln,
squash_output,
features_extractor_class,
features_extractor_kwargs,
share_features_extractor,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
self.lstm_kwargs = lstm_kwargs or {}
self.shared_lstm = shared_lstm
self.enable_critic_lstm = enable_critic_lstm
self.lstm_actor = nn.LSTM(
self.features_dim,
lstm_hidden_size,
num_layers=n_lstm_layers,
**self.lstm_kwargs,
)
# For the predict() method, to initialize hidden states
# (n_lstm_layers, batch_size, lstm_hidden_size)
self.lstm_hidden_state_shape = (n_lstm_layers, 1, lstm_hidden_size)
self.critic = None
self.lstm_critic = None
assert not (
self.shared_lstm and self.enable_critic_lstm
), "You must choose between shared LSTM, seperate or no LSTM for the critic."
assert not (
self.shared_lstm and not self.share_features_extractor
), "If the features extractor is not shared, the LSTM cannot be shared."
# No LSTM for the critic, we still need to convert
# output of features extractor to the correct size
# (size of the output of the actor lstm)
if not (self.shared_lstm or self.enable_critic_lstm):
self.critic = nn.Linear(self.features_dim, lstm_hidden_size)
# Use a separate LSTM for the critic
if self.enable_critic_lstm:
self.lstm_critic = nn.LSTM(
self.features_dim,
lstm_hidden_size,
num_layers=n_lstm_layers,
**self.lstm_kwargs,
)
# Setup optimizer with initial learning rate
self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
def _build_mlp_extractor(self) -> None:
"""
Create the policy and value networks.
Part of the layers can be shared.
"""
self.mlp_extractor = MlpExtractor(
self.lstm_output_dim,
net_arch=self.net_arch,
activation_fn=self.activation_fn,
device=self.device,
)
@staticmethod
def _process_sequence(
features: th.Tensor,
lstm_states: Tuple[th.Tensor, th.Tensor],
episode_starts: th.Tensor,
lstm: nn.LSTM,
) -> Tuple[th.Tensor, th.Tensor]:
"""
Do a forward pass in the LSTM network.
:param features: Input tensor
:param lstm_states: previous cell and hidden states of the LSTM
:param episode_starts: Indicates when a new episode starts,
in that case, we need to reset LSTM states.
:param lstm: LSTM object.
:return: LSTM output and updated LSTM states.
"""
# LSTM logic
# (sequence length, batch size, features dim)
# (batch size = n_envs for data collection or n_seq when doing gradient update)
n_seq = lstm_states[0].shape[1]
# Batch to sequence
# (padded batch size, features_dim) -> (n_seq, max length, features_dim) -> (max length, n_seq, features_dim)
# note: max length (max sequence length) is always 1 during data collection
features_sequence = features.reshape((n_seq, -1, lstm.input_size)).swapaxes(0, 1)
episode_starts = episode_starts.reshape((n_seq, -1)).swapaxes(0, 1)
# If we don't have to reset the state in the middle of a sequence
# we can avoid the for loop, which speeds up things
if th.all(episode_starts == 0.0):
lstm_output, lstm_states = lstm(features_sequence, lstm_states)
lstm_output = th.flatten(lstm_output.transpose(0, 1), start_dim=0, end_dim=1)
return lstm_output, lstm_states
lstm_output = []
# Iterate over the sequence
for features, episode_start in zip_strict(features_sequence, episode_starts):
hidden, lstm_states = lstm(
features.unsqueeze(dim=0),
(
# Reset the states at the beginning of a new episode
(1.0 - episode_start).view(1, n_seq, 1) * lstm_states[0],
(1.0 - episode_start).view(1, n_seq, 1) * lstm_states[1],
),
)
lstm_output += [hidden]
# Sequence to batch
# (sequence length, n_seq, lstm_out_dim) -> (batch_size, lstm_out_dim)
lstm_output = th.flatten(th.cat(lstm_output).transpose(0, 1), start_dim=0, end_dim=1)
return lstm_output, lstm_states
def forward(
self,
obs: th.Tensor,
lstm_states: RNNStates,
episode_starts: th.Tensor,
deterministic: bool = False,
) -> Tuple[th.Tensor, th.Tensor, th.Tensor, RNNStates]:
"""
Forward pass in all the networks (actor and critic)
:param obs: Observation. Observation
:param lstm_states: The last hidden and memory states for the LSTM.
:param episode_starts: Whether the observations correspond to new episodes
or not (we reset the lstm states in that case).
:param deterministic: Whether to sample or use deterministic actions
:return: action, value and log probability of the action
"""
# Preprocess the observation if needed
features = self.extract_features(obs)
if self.share_features_extractor:
pi_features = vf_features = features # alis
else:
pi_features, vf_features = features
# latent_pi, latent_vf = self.mlp_extractor(features)
latent_pi, lstm_states_pi = self._process_sequence(pi_features, lstm_states.pi, episode_starts, self.lstm_actor)
if self.lstm_critic is not None:
latent_vf, lstm_states_vf = self._process_sequence(vf_features, lstm_states.vf, episode_starts, self.lstm_critic)
elif self.shared_lstm:
# Re-use LSTM features but do not backpropagate
latent_vf = latent_pi.detach()
lstm_states_vf = (lstm_states_pi[0].detach(), lstm_states_pi[1].detach())
else:
# Critic only has a feedforward network
latent_vf = self.critic(vf_features)
lstm_states_vf = lstm_states_pi
latent_pi = self.mlp_extractor.forward_actor(latent_pi)
latent_vf = self.mlp_extractor.forward_critic(latent_vf)
# Evaluate the values for the given observations
values = self.value_net(latent_vf)
distribution = self._get_action_dist_from_latent(latent_pi)
actions = distribution.get_actions(deterministic=deterministic)
log_prob = distribution.log_prob(actions)
return actions, values, log_prob, RNNStates(lstm_states_pi, lstm_states_vf)
def get_distribution(
self,
obs: th.Tensor,
lstm_states: Tuple[th.Tensor, th.Tensor],
episode_starts: th.Tensor,
) -> Tuple[Distribution, Tuple[th.Tensor, ...]]:
"""
Get the current policy distribution given the observations.
:param obs: Observation.
:param lstm_states: The last hidden and memory states for the LSTM.
:param episode_starts: Whether the observations correspond to new episodes
or not (we reset the lstm states in that case).
:return: the action distribution and new hidden states.
"""
# Call the method from the parent of the parent class
features = super(ActorCriticPolicy, self).extract_features(obs, self.pi_features_extractor)
latent_pi, lstm_states = self._process_sequence(features, lstm_states, episode_starts, self.lstm_actor)
latent_pi = self.mlp_extractor.forward_actor(latent_pi)
return self._get_action_dist_from_latent(latent_pi), lstm_states
def predict_values(
self,
obs: th.Tensor,
lstm_states: Tuple[th.Tensor, th.Tensor],
episode_starts: th.Tensor,
) -> th.Tensor:
"""
Get the estimated values according to the current policy given the observations.
:param obs: Observation.
:param lstm_states: The last hidden and memory states for the LSTM.
:param episode_starts: Whether the observations correspond to new episodes
or not (we reset the lstm states in that case).
:return: the estimated values.
"""
# Call the method from the parent of the parent class
features = super(ActorCriticPolicy, self).extract_features(obs, self.vf_features_extractor)
if self.lstm_critic is not None:
latent_vf, lstm_states_vf = self._process_sequence(features, lstm_states, episode_starts, self.lstm_critic)
elif self.shared_lstm:
# Use LSTM from the actor
latent_pi, _ = self._process_sequence(features, lstm_states, episode_starts, self.lstm_actor)
latent_vf = latent_pi.detach()
else:
latent_vf = self.critic(features)
latent_vf = self.mlp_extractor.forward_critic(latent_vf)
return self.value_net(latent_vf)
def evaluate_actions(
self, obs: th.Tensor, actions: th.Tensor, lstm_states: RNNStates, episode_starts: th.Tensor
) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Evaluate actions according to the current policy,
given the observations.
:param obs: Observation.
:param actions:
:param lstm_states: The last hidden and memory states for the LSTM.
:param episode_starts: Whether the observations correspond to new episodes
or not (we reset the lstm states in that case).
:return: estimated value, log likelihood of taking those actions
and entropy of the action distribution.
"""
# Preprocess the observation if needed
features = self.extract_features(obs)
if self.share_features_extractor:
pi_features = vf_features = features # alias
else:
pi_features, vf_features = features
latent_pi, _ = self._process_sequence(pi_features, lstm_states.pi, episode_starts, self.lstm_actor)
if self.lstm_critic is not None:
latent_vf, _ = self._process_sequence(vf_features, lstm_states.vf, episode_starts, self.lstm_critic)
elif self.shared_lstm:
latent_vf = latent_pi.detach()
else:
latent_vf = self.critic(vf_features)
latent_pi = self.mlp_extractor.forward_actor(latent_pi)
latent_vf = self.mlp_extractor.forward_critic(latent_vf)
distribution = self._get_action_dist_from_latent(latent_pi)
log_prob = distribution.log_prob(actions)
values = self.value_net(latent_vf)
return values, log_prob, distribution.entropy()
def _predict(
self,
observation: th.Tensor,
lstm_states: Tuple[th.Tensor, th.Tensor],
episode_starts: th.Tensor,
deterministic: bool = False,
) -> Tuple[th.Tensor, Tuple[th.Tensor, ...]]:
"""
Get the action according to the policy for a given observation.
:param observation:
:param lstm_states: The last hidden and memory states for the LSTM.
:param episode_starts: Whether the observations correspond to new episodes
or not (we reset the lstm states in that case).
:param deterministic: Whether to use stochastic or deterministic actions
:return: Taken action according to the policy and hidden states of the RNN
"""
distribution, lstm_states = self.get_distribution(observation, lstm_states, episode_starts)
return distribution.get_actions(deterministic=deterministic), lstm_states
def predict(
self,
observation: Union[np.ndarray, Dict[str, np.ndarray]],
state: Optional[Tuple[np.ndarray, ...]] = None,
episode_start: Optional[np.ndarray] = None,
deterministic: bool = False,
) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:
"""
Get the policy action from an observation (and optional hidden state).
Includes sugar-coating to handle different observations (e.g. normalizing images).
:param observation: the input observation
:param lstm_states: The last hidden and memory states for the LSTM.
:param episode_starts: Whether the observations correspond to new episodes
or not (we reset the lstm states in that case).
:param deterministic: Whether or not to return deterministic actions.
:return: the model's action and the next hidden state
(used in recurrent policies)
"""
# Switch to eval mode (this affects batch norm / dropout)
self.set_training_mode(False)
observation, vectorized_env = self.obs_to_tensor(observation)
if isinstance(observation, dict):
n_envs = observation[next(iter(observation.keys()))].shape[0]
else:
n_envs = observation.shape[0]
# state : (n_layers, n_envs, dim)
if state is None:
# Initialize hidden states to zeros
state = np.concatenate([np.zeros(self.lstm_hidden_state_shape) for _ in range(n_envs)], axis=1)
state = (state, state)
if episode_start is None:
episode_start = np.array([False for _ in range(n_envs)])
with th.no_grad():
# Convert to PyTorch tensors
states = th.tensor(state[0], dtype=th.float32, device=self.device), th.tensor(
state[1], dtype=th.float32, device=self.device
)
episode_starts = th.tensor(episode_start, dtype=th.float32, device=self.device)
actions, states = self._predict(
observation, lstm_states=states, episode_starts=episode_starts, deterministic=deterministic
)
states = (states[0].cpu().numpy(), states[1].cpu().numpy())
# Convert to numpy
actions = actions.cpu().numpy()
if isinstance(self.action_space, spaces.Box):
if self.squash_output:
# Rescale to proper domain when using squashing
actions = self.unscale_action(actions)
else:
# Actions could be on arbitrary scale, so clip the actions to avoid
# out of bound error (e.g. if sampling from a Gaussian distribution)
actions = np.clip(actions, self.action_space.low, self.action_space.high)
# Remove batch dimension if needed
if not vectorized_env:
actions = actions.squeeze(axis=0)
return actions, states
class RecurrentActorCriticCnnPolicy(RecurrentActorCriticPolicy):
"""
CNN recurrent policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param share_features_extractor: If True, the features extractor is shared between the policy and value networks.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
:param lstm_hidden_size: Number of hidden units for each LSTM layer.
:param n_lstm_layers: Number of LSTM layers.
:param shared_lstm: Whether the LSTM is shared between the actor and the critic.
By default, only the actor has a recurrent network.
:param enable_critic_lstm: Use a seperate LSTM for the critic.
:param lstm_kwargs: Additional keyword arguments to pass the the LSTM
constructor.
"""
def __init__(
self,
observation_space: spaces.Space,
action_space: spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
share_features_extractor: bool = True,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
lstm_hidden_size: int = 256,
n_lstm_layers: int = 1,
shared_lstm: bool = False,
enable_critic_lstm: bool = True,
lstm_kwargs: Optional[Dict[str, Any]] = None,
):
super().__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
ortho_init,
use_sde,
log_std_init,
full_std,
use_expln,
squash_output,
features_extractor_class,
features_extractor_kwargs,
share_features_extractor,
normalize_images,
optimizer_class,
optimizer_kwargs,
lstm_hidden_size,
n_lstm_layers,
shared_lstm,
enable_critic_lstm,
lstm_kwargs,
)
class RecurrentMultiInputActorCriticPolicy(RecurrentActorCriticPolicy):
"""
MultiInputActorClass policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param share_features_extractor: If True, the features extractor is shared between the policy and value networks.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
:param lstm_hidden_size: Number of hidden units for each LSTM layer.
:param n_lstm_layers: Number of LSTM layers.
:param shared_lstm: Whether the LSTM is shared between the actor and the critic.
By default, only the actor has a recurrent network.
:param enable_critic_lstm: Use a seperate LSTM for the critic.
:param lstm_kwargs: Additional keyword arguments to pass the the LSTM
constructor.
"""
def __init__(
self,
observation_space: spaces.Space,
action_space: spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
share_features_extractor: bool = True,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
lstm_hidden_size: int = 256,
n_lstm_layers: int = 1,
shared_lstm: bool = False,
enable_critic_lstm: bool = True,
lstm_kwargs: Optional[Dict[str, Any]] = None,
):
super().__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
ortho_init,
use_sde,
log_std_init,
full_std,
use_expln,
squash_output,
features_extractor_class,
features_extractor_kwargs,
share_features_extractor,
normalize_images,
optimizer_class,
optimizer_kwargs,
lstm_hidden_size,
n_lstm_layers,
shared_lstm,
enable_critic_lstm,
lstm_kwargs,
)
|
/sb3_contrib-2.1.0.tar.gz/sb3_contrib-2.1.0/sb3_contrib/common/recurrent/policies.py
| 0.964153 | 0.599397 |
policies.py
|
pypi
|
from typing import List, Optional
import numpy as np
from gymnasium import spaces
from stable_baselines3.common.envs import IdentityEnv
class InvalidActionEnvDiscrete(IdentityEnv[int]):
"""
Identity env with a discrete action space. Supports action masking.
"""
def __init__(
self,
dim: Optional[int] = None,
ep_length: int = 100,
n_invalid_actions: int = 0,
):
if dim is None:
dim = 1
assert n_invalid_actions < dim, f"Too many invalid actions: {n_invalid_actions} < {dim}"
space = spaces.Discrete(dim)
self.n_invalid_actions = n_invalid_actions
self.possible_actions = np.arange(space.n)
self.invalid_actions: List[int] = []
super().__init__(space=space, ep_length=ep_length)
def _choose_next_state(self) -> None:
self.state = self.action_space.sample()
# Randomly choose invalid actions that are not the current state
potential_invalid_actions = [i for i in self.possible_actions if i != self.state]
self.invalid_actions = np.random.choice(potential_invalid_actions, self.n_invalid_actions, replace=False).tolist()
def action_masks(self) -> List[bool]:
return [action not in self.invalid_actions for action in self.possible_actions]
class InvalidActionEnvMultiDiscrete(IdentityEnv[np.ndarray]):
"""
Identity env with a multidiscrete action space. Supports action masking.
"""
action_space: spaces.MultiDiscrete
def __init__(
self,
dims: Optional[List[int]] = None,
ep_length: int = 100,
n_invalid_actions: int = 0,
):
if dims is None:
dims = [1, 1]
if n_invalid_actions > sum(dims) - len(dims):
raise ValueError(f"Cannot find a valid action for each dim. Set n_invalid_actions <= {sum(dims) - len(dims)}")
space = spaces.MultiDiscrete(dims)
self.n_invalid_actions = n_invalid_actions
self.possible_actions = np.arange(sum(dims))
self.invalid_actions: List[int] = []
super().__init__(space=space, ep_length=ep_length)
def _choose_next_state(self) -> None:
self.state = self.action_space.sample()
converted_state: List[int] = []
running_total = 0
for i in range(len(self.action_space.nvec)):
converted_state.append(running_total + self.state[i])
running_total += self.action_space.nvec[i]
# Randomly choose invalid actions that are not the current state
potential_invalid_actions = [i for i in self.possible_actions if i not in converted_state]
self.invalid_actions = np.random.choice(potential_invalid_actions, self.n_invalid_actions, replace=False).tolist()
def action_masks(self) -> List[bool]:
return [action not in self.invalid_actions for action in self.possible_actions]
class InvalidActionEnvMultiBinary(IdentityEnv[np.ndarray]):
"""
Identity env with a multibinary action space. Supports action masking.
"""
def __init__(
self,
dims: Optional[int] = None,
ep_length: int = 100,
n_invalid_actions: int = 0,
):
if dims is None:
dims = 1
if n_invalid_actions > dims:
raise ValueError(f"Cannot find a valid action for each dim. Set n_invalid_actions <= {dims}")
space = spaces.MultiBinary(dims)
self.n_dims = dims
self.n_invalid_actions = n_invalid_actions
self.possible_actions = np.arange(2 * dims)
self.invalid_actions: List[int] = []
super().__init__(space=space, ep_length=ep_length)
def _choose_next_state(self) -> None:
self.state = self.action_space.sample()
converted_state: List[int] = []
running_total = 0
for i in range(self.n_dims):
converted_state.append(running_total + self.state[i])
running_total += 2
# Randomly choose invalid actions that are not the current state
potential_invalid_actions = [i for i in self.possible_actions if i not in converted_state]
self.invalid_actions = np.random.choice(potential_invalid_actions, self.n_invalid_actions, replace=False).tolist()
def action_masks(self) -> List[bool]:
return [action not in self.invalid_actions for action in self.possible_actions]
|
/sb3_contrib-2.1.0.tar.gz/sb3_contrib-2.1.0/sb3_contrib/common/envs/invalid_actions_env.py
| 0.895657 | 0.516595 |
invalid_actions_env.py
|
pypi
|
# SbNative
An extention to python for debugging and such. Things that, well `Should Be Native`
## SbNative may be used for
- private projects.
- public projects (The SbNative repo must be referenced in a Readme in case the source code is available.)
DO NOT DISTRIBUTE.
ALL NON MENTIONED RIGHTS RESERVED.
## Chapter 1: debugging
All of the necessary dependencies are located or imported in the `debugtools.py` file.
- `switchTerminalStacking`. Terminal stacking is a method for compressing logged information into a single line if possible.
Ex:
```python
from sbNative.debugtools import log
for _ in range(10):
log("this module should be native!")
```
leads to this result when not using terminal stacking:
```
LOG: this module should be native! --> c:FILEPATH.py:5
LOG: this module should be native! --> c:FILEPATH.py:5
LOG: this module should be native! --> c:FILEPATH.py:5
LOG: this module should be native! --> c:FILEPATH.py:5
LOG: this module should be native! --> c:FILEPATH.py:5
LOG: this module should be native! --> c:FILEPATH.py:5
LOG: this module should be native! --> c:FILEPATH.py:5
LOG: this module should be native! --> c:FILEPATH.py:5
LOG: this module should be native! --> c:FILEPATH.py:5
LOG: this module should be native! --> c:FILEPATH.py:5
```
which obviously is not very clean...
Instead, doing this:
```python
from sbNative.debugtools import log,switchTerminalStacking
switchTerminalStacking()
for _ in range(10):
log("this module should be native!")
```
leads to an arguably cleaner output:
```
LOG: this module should be native! --> c:FILEPATH.py:7 [10x]
```
- `log`. Prints all the arguments given to
the console and the file + line of the call.
Supports more advanced logging when paired with the `cleanRepr` class decorator.
As shown above, it also claryfies that a value has been logged. Having a line at the end helps finding the log call and editing/removing it quickly. In many editors, (tested in VSCODE) you may CTRL+LEFTCLICK the line and it will redirect you to the file and corresponding line of the call.
Ex:
```
LOG: this module should be native! --> c:/---/Desktop/test1.py:6
```
The depth parameter controls how far the lookup goes into the callstack returning the filename and number after the `-->`. This is a feature for functions written by you, to redirect the user or yourself to the line **your** function was called at. Incrementing goes further into the callstack. Default: 2.
- `ilog`. "Info Log". Behaves mainly like `log`
Only difference: the first argument will be used to represent what is being logged.
- `isFromCall`. Gets if a function with the name `funcName` is in the callstack.
Used by `__clsRepr` to determine if it should add markers in the form of `lignSplitSign` where newlines can be added if the logging string is too long.
- `cleanRepr`. A decorator which makes the representation of your class as clean as possible. If you don't want specific class or instance variables to be included, you may specify their name as arguments for this function.
- `getTerminalOutputs`. Returns the terminal output content recorded while the function was running, and the result from the function in a tuple.
(TerminalOutput,FunctionResult)
<span style="color:red">***WARNING: THIS FUNCTION ALLOCATES THE RESULT TO YOUR DRIVE AND NOT MEMORY. PRINTING MAY BE VERY SLOW, DO NOT EVER USE IN PRODUCTION WITH HIGH WORKLOADS.***</span>
- `timer`. A simple decorator for timing the
execution time of a function or method.
Brags the `ilog` function. (:
- `tPlotArgs` Enums or "Flags" to sort after the execution times of the functions or the arguments passed to the function.
- `timePlotter` Works the same way as the `timer` decorator, tho it returns an object and the decorator is the function `timePlotter.time`.
The major difference is the ability to plot the times on a matplotlib graph. You can sort the time or arguments with the Enums from `tPlotArgs`.
The reverse kwarg may only reverse the x axis.
The arguments or keyarguments that are supposed to be displayed on the plot have to be passed into the `trackArgs`/`trackKwargs` parameters. For args, these have to be the indicies of the argument, for kwargs the name of the keyword-argument.
Decorate the function to be tracked with the `timer` method, and plot them with the `show` one.
You may not use the same instance on multiple functions, otherwise, an error will be raised.
## Chapter 2: runtime utilities
All of the neccessary dependencies are located or imported in the `runtimetools.py` file.
- `getPath` Retrieves the path of the file it has been called in. Returns a `Path` object from the built-in `pathlib` module.
- `globaliseAllSubitems` Adds all the subitems of a module or folder containing a `__init__.py` file to the global scope, do not ever use this function if you are not desperate, the IDE wont recognise its behaviour.
- `execWithExcTb` Extends the built-in `exec` function, tho shows the exceptions when one is raised, with the appropriate format.
- `runAndCast` <span style="color:red">***NOT IMPLEMENTED COMPLETELY YET.***</span>
- `safeIter` Allows iteration and removal of items inside the iterable simultaneously.
- `bidirectionalDict` One may get the original key by the values, like in {"Richard":["Rick","Dick"]}
Using indexing or attribute getter with "Richard", "Rick" or "Dick" here will return "Richard"
When a value is given and whilst not contained in the dict, a KeyError will be risen.
Full Ex:
```python
d = runtimetools.BiDirectionaldict(
Richard = ["Dick", "Rick"],
Annamarie = ["Marie", "Anna", "Ann"]
)
print(d.Richard, d["Richard"])
print(d.Rick, d["Rick"])
print(d.Dick, d["Dick"])
print(d.Annamarie, d["Annamarie"])
print(d.Marie, d["Marie"])
print(d.Anna, d["Anna"])
print(d.Ann, d["Ann"])
```
- `LanguageFormatter` Used to format information from a program readable structure to a more easily human readable format. All of these methods are static.
- `enumerateCollection` Takes a collection like a list, tuple or anything else with a join method and converts the contents into a human readable enumeration.
- `toAbbrNumber` Abbriviates an Integer or float dynamically, using k; m; b; t, by default, which can be changed accordingly to the language unsing the abbriviations kw. The maxPrecisionAmt kw indicates the amount of digits of the output precision.
- `AbbrNumToFloat` The exact counterpart to ***toAbbrNumber***
WATCH OUT FOR DIFFERENCES IN THE `abbriviations` VARIABLE
|
/sbNative-0.0.15.tar.gz/sbNative-0.0.15/README.md
| 0.85341 | 0.853608 |
README.md
|
pypi
|
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
|
/sbad_distributions-0.1.tar.gz/sbad_distributions-0.1/sbad_distributions/Gaussiandistribution.py
| 0.688364 | 0.853058 |
Gaussiandistribution.py
|
pypi
|
Sideband <img align="right" src="https://img.shields.io/badge/License-CC%20BY--NC--SA%204.0-lightgrey.svg"/>
=========
Sideband is an LXMF client for Android, Linux and macOS. It allows you to communicate with other people or LXMF-compatible systems over Reticulum networks using LoRa, Packet Radio, WiFi, I2P, Encrypted QR Paper Messages, or anything else Reticulum supports.

Sideband is completely free, end-to-end encrypted, permission-less, anonymous and infrastructure-less. Sideband uses the peer-to-peer and distributed messaging system [LXMF](https://github.com/markqvist/lxmf "LXMF"). There is no sign-up, no service providers, no "end-user license agreements", no data theft and no surveillance. You own the system.
This also means that Sideband operates differently than what you might be used to. It does not need a connection to a server on the Internet to function, and you do not have an account anywhere. Please read the Guide section included in the program, to get an understanding of how Sideband differs from other messaging systems.
The program currently includes basic functionality for secure and independent communication, and many useful features are planned for implementation. Sideband is currently released as a beta version. Please help make all the functionality a reality by supporting the development with donations.
Sideband works well with the terminal-based LXMF client [Nomad Network](https://github.com/markqvist/nomadnet), which allows you to easily host Propagation Nodes for your LXMF network, and more.
If you want to help develop this program, get in touch.
## Installation
For your Android devices, download an [APK on the latest release](https://github.com/markqvist/Sideband/releases/latest) page. If you prefer to install via F-Droid, you can add the [IzzyOnDroid Repository](https://android.izzysoft.de/repo/info) to your F-Droid client, which includes Sideband.
A DMG file containing a macOS app bundle is also available on the [latest release](https://github.com/markqvist/Sideband/releases/latest) page.
Aditionally, you can install Sideband with ``pip`` on Linux and macOS:
```bash
# Install Sideband and dependencies on Linux
pip install sbapp
# Install Sideband and dependencies on macOS
pip install "sbapp[macos]"
# Run it
sideband
```
## Example Paper Message
You can try out the paper messaging functionality by using the following QR-code. It is a paper message sent to the LXMF address `6b3362bd2c1dbf87b66a85f79a8d8c75`. To be able to decrypt and read the message, you will need to import the following base32-encoded Reticulum Identity into the app:
`3BPTDTQCRZPKJT3TXAJCMQFMOYWIM3OCLKPWMG4HCF2T4CH3YZHVNHNRDU6QAZWV2KBHMWBNT2C62TQEVC5GLFM4MN25VLZFSK3ADRQ=`
You can import the identity into Sideband in the **Encryption Keys** part of the program. After the you have imported the identity, you can scan the following QR-code and open it in the app, where it will be decrypted and added to your messages.
<p align="center"><img width="50%" src="https://raw.githubusercontent.com/markqvist/LXMF/master/docs/paper_msg_test.png"/></p>
You can also find the entire message in <a href="lxm://azNivSwdv4e2aoX3mo2MdTAozuI7BlzrLlHULmnVgpz3dNT9CMPVwgywzCJP8FVogj5j_kU7j7ywuvBNcr45kRTrd19c3iHenmnSDe4VEd6FuGsAiT0Khzl7T81YZHPTDhRNp0FdhDE9AJ7uphw7zKMyqhHHxOxqrYeBeKF66gpPxDceqjsOApvsSwggjcuHBx9OxOBy05XmnJxA1unCKgvNfOFYc1T47luxoY3c0dLOJnJPwZuFRytx2TXlQNZzOJ28yTEygIfkDqEO9mZi5lgev7XZJ0DvgioQxMIyoCm7lBUzfq66zW3SQj6vHHph7bhr36dLOCFgk4fZA6yia2MlTT9KV66Tn2l8mPNDlvuSAJhwDA_xx2PN9zKadCjo9sItkAp8r-Ss1CzoUWZUAyT1oDw7ly6RrzGBG-e3eM3CL6u1juIeFiHby7_3cON-6VTUuk4xR5nwKlFTu5vsYMVXe5H3VahiDSS4Q1aqX7I">this link</a>:
`lxm://azNivSwdv4e2aoX3mo2MdTAozuI7BlzrLlHULmnVgpz3dNT9CMPVwgywzCJP8FVogj5j_kU7j7ywuvBNcr45kRTrd19c3iHenmnSDe4VEd6FuGsAiT0Khzl7T81YZHPTDhRNp0FdhDE9AJ7uphw7zKMyqhHHxOxqrYeBeKF66gpPxDceqjsOApvsSwggjcuHBx9OxOBy05XmnJxA1unCKgvNfOFYc1T47luxoY3c0dLOJnJPwZuFRytx2TXlQNZzOJ28yTEygIfkDqEO9mZi5lgev7XZJ0DvgioQxMIyoCm7lBUzfq66zW3SQj6vHHph7bhr36dLOCFgk4fZA6yia2MlTT9KV66Tn2l8mPNDlvuSAJhwDA_xx2PN9zKadCjo9sItkAp8r-Ss1CzoUWZUAyT1oDw7ly6RrzGBG-e3eM3CL6u1juIeFiHby7_3cON-6VTUuk4xR5nwKlFTu5vsYMVXe5H3VahiDSS4Q1aqX7I`
On operating systems that allow for registering custom URI-handlers, you can click the link, and it will be decoded directly in your LXMF client. This works with Sideband on Android.
## Support Sideband Development
You can help support the continued development of open, free and private communications systems by donating via one of the following channels:
- Monero:
```
84FpY1QbxHcgdseePYNmhTHcrgMX4nFfBYtz2GKYToqHVVhJp8Eaw1Z1EedRnKD19b3B8NiLCGVxzKV17UMmmeEsCrPyA5w
```
- Ethereum
```
0x81F7B979fEa6134bA9FD5c701b3501A2e61E897a
```
- Bitcoin
```
3CPmacGm34qYvR6XWLVEJmi2aNe3PZqUuq
```
- Ko-Fi: https://ko-fi.com/markqvist
<br/>
## Development Roadmap
- Adding a Nomad Net page browser
- Implementing the Local Broadcasts feature
- Adding a debug log option and viewer
- Adding a Linux .desktop file
- Message sorting mechanism
- Fix I2P status not being displayed correctly when the I2P router disappears unexpectedly
- Adding LXMF sneakernet and paper message functionality
## License
Unless otherwise noted, this work is licensed under a [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License][cc-by-nc-sa].
Permission is hereby granted to use Sideband in binary form, for any and all purposes, and to freely distribute binary copies of the program, so long as no payment or compensation is charged or received for such distribution or use.
<img src="https://licensebuttons.net/l/by-nc-sa/4.0/88x31.png" align="right">
[cc-by-nc-sa]: http://creativecommons.org/licenses/by-nc-sa/4.0/
[cc-by-nc-sa-image]: https://licensebuttons.net/l/by-nc-sa/4.0/88x31.png
[cc-by-nc-sa-shield]: https://img.shields.io/badge/License-CC%20BY--NC--SA%204.0-lightgrey.svg
*Device screenshots generated with [deviceframes](https://deviceframes.com). Thanks!*
|
/sbapp-0.5.2.tar.gz/sbapp-0.5.2/README.md
| 0.499756 | 0.866472 |
README.md
|
pypi
|
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
|
/sbbbxl_distrib-0.1.tar.gz/sbbbxl_distrib-0.1/sbbbxl_distrib/Gaussiandistribution.py
| 0.688364 | 0.853058 |
Gaussiandistribution.py
|
pypi
|
sbbst (Self Balancing Binary Search Tree)
-----------------------------------------
A Python implementation of a self balancing binary search tree (AVL Tree). Useful to practice, study and see how a SBBST works.
Introduction
============
A **self-balancing binary search tree** is a data structure, a kind advanced one I would say, that optimizes the times for insertion, deletion and serching. Even though there a few types of SBBST (2-3 tree, AA tree, AVL tree, B tree, Red-black tree, ...), in this library I decided to implement the AVL Tree because I consider it as the easiest one.
It has *O(N)* space in memory and its respectives times and functions are:
=============== ===================== =====================
Time complexity Function in the class Action
=============== ===================== =====================
*O(1)* sbbst.getSize() Size of the tree
*O(1)* sbbst.getHeightTree() Height of the tree
*O(logN)* sbbst.search(x) Search value
*O(logN)* sbbst.insert(x) Insert value
*O(logN)* sbbst.delete(x) Delete value
*O(logN)* sbbst.getMinVal() Minimum value
*O(logN)* sbbst.getMaxVal() Maximum value
*O(K+logN)* sbbst.kthsmallest(k) Kth Minimum value
*O(K+logN)* sbbst.kthlargest(k) Kth Maximum value
*O(N)* str(sbbst) Visualize the tree
=============== ===================== =====================
I made the library **sbbst** with the intention that you can use it easily for your own projects, learning or coding competitions (in which case I would suggest to compile your program with Pypy instead of Python3 and download the code directly from my Github and modify it as your necessities). I used this structure (with a few changes so it can work out with intervals instead of numbers) in the Facebook Hacker Cup 2020 and it was fast enough to pass the time complexity, though I would suggest to migrate to C++ (thing that I have not done properly yet [sept 2020]).
Requirements
============
- Python 2.7+ or 3.4+
Installation
============
To install a stable version from PyPi_:
.. code-block:: bash
~$ pip install sbbst
Or download the *__init__.py* file directly from my GitHub_ and work with it.
.. _PyPi: https://pypi.python.org/pypi/sbbst
.. _GitHub: https://github.com/Ualabi/self_balancing_binary_search_tree
The library works with the tree nodes defined as:
.. code-block:: python
class TreeNode():
def __init__ (self, val):
self.val = val
self.place = 0 # helps in the print process
self.height = 1 # mandatory in the AVL Trees
self.left = None
self.right = None
Getting Started
===============
To start working with the library, you will only need 2 lines:
.. code-block:: python
>>> from sbbst import sbbst
>>> ST = sbbst()
And that will be enough to start working with it. Take the following script as an example
.. code-block:: python
from sbbst import sbbst
ST = sbbst()
nums = [128, 131, 4, 134, 135, 10, 1, 3, 140, 14, 142, 145, 146, 147, 149] # random numbers
for num in nums:
ST.insert(num)
# It also works out: ST = sbbst(nums)
print(ST)
print("Number of elements:",ST.getSize())
print("Height:",ST.getHeightTree())
print("Min val:",ST.getMinVal())
print("Max val:",ST.getMaxVal())
print("3rd smallest val:",ST.kthsmallest(3))
print("2nd largest val:",ST.kthlargest(2))
print("Pre Order:",ST.inOrder())
print("In Order:",ST.preOrder())
print("Post Order:",ST.postOrder())
ST.delete(128)
ST.delete(140)
print(ST)
ST.insert(55)
print(ST)
print("Number of elements:",ST.getSize())
This would be the output you will see in the terminal:
::
____128_________
/ \
_4 ___140___
/ \ / \
1 10 134 145___
\ \ / \ / \
3 14 131 135 142 147
/ \
146 149
Number of elements: 15
Height: 5
Min val: 1
Max val: 149
3rd smallest val: 4
2nd lasrgets val: 145
Pre Order: [1, 3, 4, 10, 14, 128, 131, 134, 135, 140, 142, 145, 146, 147, 149]
In Order: [128, 4, 1, 3, 10, 14, 140, 134, 131, 135, 145, 142, 147, 146, 149]
Post Order: [3, 1, 14, 10, 4, 131, 135, 134, 142, 146, 149, 147, 145, 140, 128]
________131______
/ \
_4__ ___142
/ \ / \
1 14 134 145
\ / \ \ \
3 10 21 135 149
\
50
__________131______
/ \
_4__ ___142
/ \ / \
1 14__ 134 145
\ / \ \ \
3 10 50 135 149
/ \
21 55
Number of elements: 14
Additionally, I added 3 extra functios (the 3 of them works in *O(N)* time) in case you want to use it along you practice coding in platforms such as LeetCode_ or Interviewbit_. (At the beginning I had troubles to visualize what was happening in the Trees and the DFSs, swaps or insertions, so thats why I worked on in this library as sketch and then improved as it is today.) In those pages the *input* of the trees will be like:
::
s = "1 2 3 -1 4 -1 5 -1 -1 6 -1 -1 -1"
s = "1,2,3,null,4,null,5,null,null,6,null,null,null"
s = [ 1, 2, 3, None, 4, None, 5, None, None, 6, None, None, None ]
.. _LeetCode: https://leetcode.com/
.. _Interviewbit: https://www.interviewbit.com/courses/programming/
Some functions you can use are the following:
.. code-block:: python
from sbbst import *
# Any of the following s works out
# s = "1 2 3 -1 4 -1 5 -1 -1 6 -1 -1 -1"
# s = "1 2 3 None 4 None 5 None None 6 None None None"
# s = "1,2,3,null,4,null,5,null,null,6,null,null,null"
s = [ 1, 2, 3, None, 4, None, 5, None, None, 6, None, None, None ]
head = getTree(s)
print(getStr(head))
print("The list of the Tree is:",getList(head))
The output in the terminal will be the following:
::
_1
/ \
2 3_
\ \
4 5
/
6
The list of the Tree is: [1, 2, None, 4, None, None, 3, None, 5, 6, None, None, None]
Contributing
============
The best way to learn is to copy the code and edit it with your own necessities. You can also find other useful data structures in my GitHub https://github.com/Ualabi/Useful_Data_Structures.
If you want to contribute to this library, please do a pull request in the GitHub_. Thanks!
.. _GitHub: https://github.com/Ualabi/self_balancing_binary_search_tree
Change Log
==========
- 0.1 (09/09/2020)
- First release
- 1.0 (19/10/2020)
- Fix the counter of nodes in delete funcion. Spotted by DustinWehr_ .
.. _DustinWehr: https://github.com/DustinWehr
|
/sbbst-1.0.tar.gz/sbbst-1.0/README.rst
| 0.66769 | 0.676887 |
README.rst
|
pypi
|
import numpy as np
import pandas as pd
from sbc import util
import matplotlib.pyplot as plt
class Result:
def __init__(self, p, v, model_use):
self.p = p
self.v = v
self.model_use = model_use
def getDemandInterval(self):
return self.p
def getCVSquared(self):
return self.v
def getCoefficients(self):
print(f'p (demand interval), CV squared (coefficient of variation): {[self.p, self.v]}')
return [self.p, self.v]
def getModelUse(self):
return self.model_use
def classify_series_helper(data, type ="SBC"):
# check if input data is an array/list, etc:
data = data[~np.isnan(data)]
nzd = np.where(data != 0)[0]
k = len(nzd)
z = data[nzd]
x = np.diff(nzd, prepend=-1)
p = np.mean(x)
v = (np.std(z, ddof=1) / np.mean(z)) ** 2
if type == 'SBC':
if p > 1.32 or v < 0.49:
model_use = 'SBA'
else:
model_use = 'Croston'
else:
print("Unsupported classification type")
return Result(p, v, model_use)
def sbc_class(data, type = 'SBC', plot_type = None):
"""
Using Syntetos, Boylan, Croston method of demand pattern categorization,
an input is then classified into different patterns (Erratic, Lumpy, Smooth, Intermittent)
and the selected forecasting method (Croston, SBA)
Args:
data (list, array, dataframe): list/array of 1 time-series data or a dataframe for multiple series
type (str): "SBC" (default) ## TODO: add other method of classification (PKa, KHa, KH, PK)
plot_type (str): "summary" or "bar" or None: return a bar chart of numbers of demand patterns
in each category or a summary plot with numbers of demand patterns
in each model (similar to R/tsintermittent)
"""
# check if input data is an array/list, etc:
p = []
v = []
model_use = []
out_df = pd.DataFrame()
if np.ndim(data) == 1 and not isinstance(data, pd.DataFrame):
if len(data) != 0:
target = np.array(data)
target = target.reshape(len(target), -1)
res = classify_series_helper(target, type)
p.append(res.getDemandInterval())
v.append(res.getCVSquared())
model_use.append(res.getModelUse())
out_df['target'] = np.nan
out_df['p'] = p
out_df['CV Squared'] = v
out_df['model'] = model_use
else:
raise ValueError('Please check if data is empty')
elif np.ndim(data) == 1 and isinstance(data, pd.DataFrame):
# assume the first column to be the target
if len(data) != 0:
target = np.array(data.iloc[:, 0])
res = classify_series_helper(target, type)
p.append(res.getDemandInterval())
v.append(res.getCVSquared())
model_use.append(res.getModelUse())
out_df['target'] = np.nan
out_df['p'] = p
out_df['CV Squared'] = v
out_df['model'] = model_use
else:
raise ValueError('Please check if data is empty')
elif np.ndim(data) >1 and isinstance(data, pd.DataFrame):
if len(data) != 0:
target = data.to_numpy().T
p = []
v = []
model_use = []
out_df = pd.DataFrame()
for i in range(target.shape[0]):
res = classify_series_helper(target[i], type)
p.append(res.getDemandInterval())
v.append(res.getCVSquared())
model_use.append(res.getModelUse())
out_df['target'] = data.columns.to_list()
out_df['p'] = p
out_df['CV Squared'] = v
out_df['model'] = model_use
else:
raise ValueError('Please check if data is empty')
else:
raise ValueError('Please pass in a list, an array or a dataframe')
if plot_type is not None:
if plot_type == 'bar':
d = util.create_dict_plot_helper(out_df)
out_plot = util.bar_plot(d)
plt.show()
elif plot_type == 'summary':
out_plot = util.summary_plot(out_df)
plt.show()
else:
raise ValueError('Please pass in a correct type of plot')
return out_df
|
/sbc_classification-0.0.4-py3-none-any.whl/sbc/sbc_class.py
| 0.468547 | 0.423756 |
sbc_class.py
|
pypi
|
import logging
import uuid
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser, PermissionsMixin
from django.db import models
from django.db.models import Q
from django.db.models import QuerySet
from django.utils import timezone
from django.utils.translation import ugettext as _
from faker import Faker
from sbc_drf import errors
from sbc_drf.mixins import AddUpdateTimeModelMixin, ViewPermModelMetaMixin
from sbc_drf_user import signals
from sbc_drf_user.errors import ErrMsg
L = logging.getLogger(__name__)
class UserManager(BaseUserManager):
def _create_user(self, email, password, **extra_fields):
"""
Creates and saves a user with the given email and password.
"""
assert email, 'Users must have an email address'
email = self.normalize_email(email)
last_name = extra_fields.pop('last_name', Faker().last_name())
user = self.model(email=email, last_name=last_name, **extra_fields)
user.set_password(password)
user.save()
signals.user_registered.send(instance=user, sender=self.model)
return user
def create(self, email, password=None, **extra_fields):
"""
Registers the user
:param str email: email
:param str password: Password
:param extra_fields:
:return user:
:signal user_registered: user instance
"""
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
extra_fields.update(is_superuser=True)
return self._create_user(email, password, **extra_fields)
def generate_password_reset_key(self, email):
"""
Generates password reset key using UUID
:param str email: Email address
:signal reset_password_request:
"""
try:
user = self.get(email=self.normalize_email(email))
except User.DoesNotExist:
raise errors.NotFound(*ErrMsg.EMAIL_NOT_EXISTS_1003)
if user.is_password_reset is False:
signals.reset_password_request.send(instance=user, retry=True, sender=self.model)
return
user.is_password_reset = False
user.password_reset_requested_at = timezone.now()
user.password_reset_key = uuid.uuid4()
user.save(update_fields=['is_password_reset', 'password_reset_requested_at',
'password_reset_key'])
signals.reset_password_request.send(instance=user, retry=False, sender=self.model)
def reset_password(self, email, reset_key, new_password):
"""
Reset password using password reset key
:param str email: user email address
:param UUID reset_key: Password reset token
:param str new_password: New password to be set
:signal reset_password_request: Upon successful password change
"""
user = self.get(email=email)
assert str(user.password_reset_key) == reset_key, ErrMsg.EMAIL_PW_KEY_MISMATCH_1004[0]
assert user.is_password_reset is False, ErrMsg.PW_RESET_KEY_USED_1005[0]
user.set_password(new_password)
# changing the key so user cannot use same key again
user.password_reset_key = uuid.uuid4()
user.password_reset_at = timezone.now()
user.is_password_reset = True
user.save(update_fields=['password', 'password_reset_key', 'password_reset_at',
'is_password_reset'])
signals.reset_password_done.send(instance=user, sender=self.model)
def first_superuser(self):
return self.filter(is_superuser=True).order_by('id').first()
@classmethod
def normalize_email(cls, email):
return super().normalize_email(email).lower()
class UserQuerySet(QuerySet):
def staff_users(self):
return self.filter(Q(is_staff=True) | Q(is_superuser=True))
def superusers(self):
return self.filter(is_superuser=True)
class AbstractUser(AddUpdateTimeModelMixin, AbstractBaseUser, PermissionsMixin):
"""
Class to store user profile
"""
#: First name
first_name = models.CharField(_('First Name'), max_length=50, default='Nameless', blank=True,
db_index=True)
#: Last name
last_name = models.CharField(_('Last Name'), max_length=50, default='', blank=True,
db_index=True)
#: Email address
email = models.EmailField(_('Email Address'), unique=True)
#: Indicates if user active and allowed to login
is_active = models.BooleanField(_('active'), default=True)
#: Indicates if the user is member of staff (who is supposed to manage various modules)
is_staff = models.BooleanField(_('Staff'), default=False)
#: Should be used to when user sign-up is publicly available and email verification is required.
email_verification_key = models.UUIDField(blank=True, default=uuid.uuid4, editable=False)
#: Password rest token when user forgot their password.
password_reset_key = models.UUIDField(blank=True, default=uuid.uuid4, editable=False)
#: Stores boolean if the password is reset using the key. The idea of keeping this variable
#: is to check if the new password reset key should be generated each time user requests or it
#: should be generated only if old key is not used. I thought it this way because sometimes
#: email gets delayed and user keep trying and then all email contains different key that
#: makes user confused which key should be used to reset password.
is_password_reset = models.BooleanField(blank=True, default=True)
#: Date time that holds the last time stamp of password reset done successfully
password_reset_requested_at = models.DateTimeField(default=None, null=True)
#: Date time that holds the last time stamp of password reset done successfully
password_reset_at = models.DateTimeField(default=None, null=True)
#: Removing unwanted fields
last_login = None
objects = UserManager.from_queryset(UserQuerySet)()
USERNAME_FIELD = 'email'
class Meta(AbstractBaseUser.Meta, ViewPermModelMetaMixin):
swappable = 'AUTH_USER_MODEL'
db_table = 'user'
abstract = True
@property
def full_name(self):
return "{0} {1}".format(self.first_name, self.last_name)
def get_full_name(self):
return self.full_name
@property
def short_name(self):
return self.first_name
def change_password(self, new_password, **kw):
"""
Helper method to user change password
:param str new_password: New password
:send signal: password_changed
"""
L.debug('Changing password')
self.set_password(new_password)
self.save()
signals.password_changed.send(instance=self, sender=self.__class__)
# On Python 3: def __str__(self):
def __str__(self):
return self.email
class User(AbstractUser):
pass
|
/sbc_drf_user-1.0.3.tar.gz/sbc_drf_user-1.0.3/src/sbc_drf_user/models.py
| 0.547706 | 0.151247 |
models.py
|
pypi
|
import logging
from rest_framework import permissions as rf_permissions
L = logging.getLogger(__name__)
class DjangoModelPermissions(rf_permissions.DjangoModelPermissions):
"""
Extends to add view permission
"""
perms_map = {
'GET': ['%(app_label)s.view_%(model_name)s'],
'OPTIONS': [],
'HEAD': [],
'POST': ['%(app_label)s.add_%(model_name)s'],
'PUT': ['%(app_label)s.change_%(model_name)s'],
'PATCH': ['%(app_label)s.change_%(model_name)s'],
'DELETE': ['%(app_label)s.delete_%(model_name)s'],
}
class IsOwnerPermissions(rf_permissions.BasePermission):
"""
Permission class that grant permission according to object owner.
:param bool ownership_fields: List of attribute of model property that specify the ownership \
of object, If it's an empty object, ``True`` will always return.
.. note:: Condition check on ``ownership_fields`` will be done in ORing fashion
"""
def has_object_permission(self, request, view, obj):
"""
Checks for the single object if its user is same as obj. If you want to skip the owner
permission for LISTing the data, you can set `skip_owner_filter=True` in viewset class
"""
ownership_fields = getattr(view, 'ownership_fields', None)
# If it's explicitly mentioned to empty
if not ownership_fields or request.method == 'GET':
return True
_u = request.user
# Requesting user is accessing to himself?
if _u == obj:
return True
result = any(_u == self._get(obj, field) or _u.id == self._get(obj, field)
for field in ownership_fields)
if result is False:
logging.warning('Permission denied', extra={'user': _u,
'ownership_fields': ownership_fields,
'object': obj})
return result
@staticmethod
def _get(obj, path):
paths = path.split('__')
for p in paths:
obj = getattr(obj, p, None)
if obj is None:
return
return obj
class ActionPermissions(rf_permissions.BasePermission):
"""
Permission class that checks for custom permissions defined under model's meta option.
Model permission name should match with the custom action defined in viewset class.
for eg, if ``action_pay()`` defined under viewset, model permission set should contain custom
permission name `action_pay`.
``has_permission()`` will match for ``<app_label>.<action_name>`` as permission name if user
has.
.. note:: Be careful using this class as it doesn't check anything apart from custom \
permissions.
"""
def has_permission(self, request, view):
model_cls = getattr(view, 'model', None)
queryset = getattr(view, 'queryset', None)
if model_cls is None and queryset is not None:
model_cls = queryset.model
expected_perm = "%(app_label)s.%(action_name)s" % {'app_label': model_cls._meta.app_label,
'action_name': view.action}
return request.user.has_perm(expected_perm)
|
/sbc_drf-1.1.6.tar.gz/sbc_drf-1.1.6/src/sbc_drf/permissions.py
| 0.77535 | 0.170439 |
permissions.py
|
pypi
|
__all__ = ['DynamicFieldsSerializerMixin', 'ExcludeOnUpdateSerializerMixin',
'InjectReqUserSerializerMixin']
from django.http import QueryDict
class DynamicFieldsSerializerMixin:
"""
Returns only attributes passed in `query_params['fields']`
Eg:
/myendpoint?fields=name,email,id
"""
@property
def _readable_fields(self):
all_fields = super()._readable_fields
query_fields = self.query_fields
for field in all_fields:
field_name = getattr(field, 'name', None) or getattr(field, 'field_name', None)
if not query_fields:
yield field
elif query_fields and field_name in query_fields:
yield field
@property
def query_fields(self):
try:
fields = self.context['request'].query_params['fields']
except (KeyError, AttributeError):
return
return fields.split(',')
class ExcludeOnUpdateSerializerMixin:
"""
Excludes defined fields in `Meta.exclude_on_update` when making update request
"""
def update(self, instance, validated_data):
# Exclude those fields defined under Meta.exclude_on_update attribute
exclude_on_update = getattr(self.Meta, 'exclude_on_update', [])
for field in exclude_on_update:
validated_data.pop(field, None)
return super().update(instance, validated_data)
class NewSerializerMixin:
"""
A serializer mixin to create a serializer with new meta attributes.
Eg:
```
class MySerializer(NewSerializerMixin, Serializer):
class Meta:
fields = '__all__'
# When it's required that we need existing serializer with different fields
MyRefSerializer = MySerializer.New(fields=('abc', 'xyz'))
```
"""
@classmethod
def New(cls, **meta_kwargs):
class NewCls(cls):
class Meta(cls.Meta):
pass
for k, v in meta_kwargs.items():
setattr(NewCls.Meta, k, v)
return NewCls
class InjectReqUserSerializerMixin:
"""
Automatically sets a model's field to request.user
Usage:
class MySerializer(AutoSetSerializerMixin, ModelSerializer):
class Meta:
...
model_user_field = 'user'
"""
def to_internal_value(self, data):
model_user_field = getattr(self.Meta, 'model_user_field')
if model_user_field:
if isinstance(data, QueryDict):
data._mutable = True
data[model_user_field] = getattr(self.context['request'].user, 'id')
if isinstance(data, QueryDict):
data._mutable = False
return super().to_internal_value(data)
|
/sbc_drf-1.1.6.tar.gz/sbc_drf-1.1.6/src/sbc_drf/mixins/serializer.py
| 0.830147 | 0.27269 |
serializer.py
|
pypi
|
import logging
class OverlayParam:
''' Represent a parameter in an overlay '''
def __init__(self, name:str, param_type:str, number_as_str=False, param_help='No additional help available', values=None, default_value=None, set_value=None):
self.name = name
if param_type in ['str', 'string']:
self.param_type = str
elif param_type in ['int', 'integer']:
self.param_type = int
elif param_type in ['bool', 'boolean']:
self.param_type = bool
elif param_type in ['float', 'decimal']:
self.param_type = float
else:
self.param_type = None
self.number_as_str = number_as_str
self.param_help = param_help
self.values = values if values is not None else []
self.default_value = default_value
self.set_value = default_value if set_value is None else set_value
def __str__(self):
''' Print the parameter help as a line '''
return f"({'required' if self.default_value is None else 'optional'}) " + \
str(self.values) if self.values else '' + \
f"(DEFAULT: {self.default_value})" if self.default_value else '' + \
self.param_help
def update_value(self, value):
''' Update the set value. Verify type and if in values list '''
if self.param_type is not None:
if self.number_as_str and isinstance(value, int) or isinstance(value, float):
value = str(value)
if not isinstance(value, self.param_type):
raise ValueError(f"Unable to set value for {self.name} to {value}. Does not match type {self.param_type}")
if len(self.values) != 0:
if value not in self.values:
raise ValueError(f"Unable to set value for {self.name} to {value}. Supported values: {self.values}")
self.set_value = value
@property
def ok(self):
''' Return True if parameter value is set '''
return self.set_value is not None
class DynamicOverlay:
''' Represent an overlay '''
def __init__(self, name:str, template='gpio_basic.dts.j2', params=None):
''' Create overlay instance from the configuration file '''
self.name = name
self.template = template
self.params = []
if isinstance(params, list):
for _param in params:
try:
self.params.append(OverlayParam(**_param))
except Exception as ex:
logging.error('Unable to import parameter for %s: %s', self.name, _param)
raise ex
else:
raise ValueError(f"Expecting a list of parameters and received {type(params)}")
@property
def ok(self) -> bool:
''' Return True if all required parameters are configured, else False '''
return len(self.missing_params) == 0
@property
def missing_params(self) -> list:
''' Return a list of required parameters that are missing '''
missing_params = []
for _param in self.params:
if not _param.ok:
missing_params.append(_param.name)
return missing_params
@property
def param_name_list(self) -> list:
''' Return a list of all available parameters '''
return [_overlay_param.name for _overlay_param in self.params]
def get_param(self, param_name:str) -> OverlayParam:
''' Return a specific parameter '''
for _param in self.params:
if _param.name == param_name:
return _param
raise ValueError(f'Unknown parameter {param_name}. Parameters are {self.param_name_list}')
def set_params(self, params:dict):
''' Update the params for the overlay. Verify all params set '''
for item, value in params.items():
if item not in self.param_name_list:
logging.error('Parameter %s not a valid parameter', item)
raise ValueError(f"Paramter {item} not a valid parameter")
self.get_param(item).update_value(value)
for _param in self.params:
if not _param.ok:
raise ValueError(f"Not all required parameters configured. {_param} is required")
|
/sbc_gpio-1.0.3-py3-none-any.whl/sbc_gpio/dynamic_dts.py
| 0.665084 | 0.262877 |
dynamic_dts.py
|
pypi
|
import collections
import datetime
import logging
import ntpath
import re
import pytz
L = logging.getLogger('app.' + __name__)
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
def str2bool(v):
"""
Converts string to bool. True for any term from "yes", "true", "t", "1"
:param str v: Term
:return bool:
"""
try:
return v.lower() in ("yes", "true", "t", "1")
except:
return False
def prop2pair(cls, out='tuple', startswith_only=None):
"""
Iterates over each property of the cls and prepare the key-value pair
:param cls: Class to be interated over
:param str out: Output format either `tuple` or `dict`
:param str startswith_only: Consider only properties that starts with this value
:return tuple,dict:
"""
if startswith_only is not None:
d = {
getattr(cls, prop)[0]:
getattr(cls, prop)[1] if getattr(cls, prop)[1] else getattr(cls, prop)[0]
for prop in dir(cls) if prop.startswith(startswith_only) is True
}
else:
d = {
getattr(cls, prop)[0]:
getattr(cls, prop)[1] if getattr(cls, prop)[1] else getattr(cls, prop)[0]
for prop in dir(cls) if prop.startswith('_') is False
}
if out == 'tuple':
d = list(d.items())
# Keep tuple consistent so when migration runs it won't detect its changes
d.sort(key=lambda x: x[0])
elif out == 'list':
return sorted(list(d.keys()))
return d
def round_off(value, digits=2):
"""
Rounding off the value
:param float value: Value to be rounded
:param digits: Digit to kept as after point
:return float: Rounded value
"""
return float(("{0:.%sf}" % digits).format(value))
def camel_to_snake_case(name):
"""
Converts given camel cased string to snake case
For eg:
- CamelCamelCase -> camel_camel_case
- Camel2Camel2Case -> camel2_camel2_case
- get2HTTPResponseCode -> get2_http_response_code
:param str name: String to be converted
:return str: Converted string
"""
s1 = first_cap_re.sub(r'\1_\2', name)
return all_cap_re.sub(r'\1_\2', s1).lower().replace(' ', '_')
def snake_case_to_title(s):
"""
Converts snake case string to title case
:param str s: String to be converted
:return str: Converted string
"""
return s.replace('_', ' ').title().replace(' ', '')
def strip_dict(d):
return {k: v.strip() if isinstance(v, str) else v for k, v in d.items()}
def utc_to_local_time(t, to_tz, fmt='%H:%M'):
utc_tz = pytz.timezone('UTC')
local_tz = pytz.timezone(to_tz)
dt = datetime.datetime.combine(datetime.date.today(), t)
local_dt = utc_tz.localize(dt).astimezone(local_tz)
if fmt is None:
return local_dt.time()
return local_dt.strftime(fmt)
def path_leaf(path):
"""
Extracts file name from given path
:param str path: Path be extracted the file name from
:return str: File name
"""
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def flatten_dict(d, parent_key='', sep='.'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
class Constant:
pass
class ConstMember(str):
def __new__(cls, value, *args, **kwargs):
# explicitly only pass value to the str constructor
return str.__new__(cls, value)
def __init__(self, value, help_text=''):
self.value = value
self.text = help_text
def __str__(self):
return self.value
def __iter__(self):
yield self.value
yield self.text
def __getitem__(self, item):
if item == 0:
return self.value
elif item == 1:
return self.text
else:
raise IndexError()
|
/sbc_utils-1.0.2-py3-none-any.whl/sbc_utils/__init__.py
| 0.63375 | 0.275142 |
__init__.py
|
pypi
|
import csv
import io
from typing import List
class Output:
TEXT = 'text'
CSV = 'csv'
TABLE = 'table'
CHOICES = ['text', 'csv', 'table']
def __init__(self, *headers: List[str]):
if not isinstance(headers, tuple) or not headers:
raise ValueError('expected list of strings for headers')
self._headers = headers
self._data = []
self._col_width = [len(header) for header in headers]
def add(self, *row: List[str]):
if not isinstance(row, tuple) or len(row) != len(self._headers):
raise ValueError(
f'expected list of ({len(self._headers)} strings for row. Got {row}')
self._data.append(row)
for index, cell in enumerate(row):
self._col_width[index] = max(
self._col_width[index], len(str(cell)))
def export(self, type: int) -> str:
if type == self.TEXT:
return self._export_text()
elif type == self.CSV:
return self._export_csv()
elif type == self.TABLE:
return self._export_table()
def _get_padded_lines(self) -> list:
lines = []
for row in self._data:
line = [pad(cell, self._col_width[index])
for index, cell in enumerate(row)]
lines.append(line)
return lines
def _export_text(self) -> str:
lines = self._get_padded_lines()
return '\n'.join(['|'.join(line) for line in lines])
def _export_csv(self) -> str:
with io.StringIO(newline='') as f:
writer = csv.writer(f)
writer.writerow(self._headers)
writer.writerows(self._data)
f.seek(0)
return f.read()
def _export_table(self) -> str:
with io.StringIO(newline='') as f:
f.writelines([
'+'+'+'.join(['-'*n for n in self._col_width])+'+\n',
'|'+'|'.join([pad(h, self._col_width[n])
for n, h in enumerate(self._headers)])+'|\n',
'+'+'+'.join(['-'*n for n in self._col_width])+'+\n',
])
for row in self._data:
f.write(
'|'+'|'.join(pad(cell, self._col_width[n])
for n, cell in enumerate(row))+'|\n')
f.writelines([
'+'+'+'.join(['-'*n for n in self._col_width])+'+\n'])
f.seek(0)
return f.read()
def pad(x, n) -> str:
if isinstance(x, str):
return x.ljust(n)
return str(x).rjust(n)
|
/sbcli_furlan-0.0.7-py3-none-any.whl/src/tools/output.py
| 0.708717 | 0.186891 |
output.py
|
pypi
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.