code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Utility for download text from web.
"""
import typing
import logging
import urllib.parse
import urllib.request
import urllib.error
import http.client
from .downloader import Downloader, DownloadStringResult, DownloadError
from .cachers import BaseCacher
logging.getLogger().addHandler(logging.NullHandler())
class UrlDownloadStringResult(DownloadStringResult):
""" Contains downloaded string
and provides mechanism for feedback of downloaded string quality (actuality, correctness, fullness, etc.)
to downloader (e.g. for cache and throttle management)
"""
def __init__(
self,
downloaded_string: str,
cacher: BaseCacher,
url: str,
parameters: typing.List[typing.Tuple[str, str]],
headers: typing.Dict[str, str]):
""" Initialize instance.
:param downloaded_string: Downloaded string.
:param cacher: Cacher used for storing downloaded strings.
:param url: URL of request.
:param parameters: Parameters of request.
:param headers: Headers of request.
"""
super().__init__(downloaded_string)
self.cacher = cacher
self.url = url
self.parameters = parameters
self.headers = headers
def set_correctness(self, is_correct: bool):
super().set_correctness(is_correct)
if not self.is_correct:
self.cacher.drop(self.url, self.parameters, self.headers)
class UrlDownloader(Downloader):
""" Cacheable string (text) downloader from web using ``urllib``.
"""
default_timeout_in_seconds: float = 600.0
default_encoding: str = 'utf-8'
def __init__(
self,
cacher: BaseCacher,
timeout_in_seconds: float = default_timeout_in_seconds,
encoding: str = default_encoding):
""" Initialize downloader.
:param cacher: Cacher used for storing downloaded strings.
:param timeout_in_seconds: Timeout value for download request.
:param encoding: Encoding of downloaded strings.
"""
self.logger = logging.getLogger(__name__ + '.' + self.__class__.__name__)
self.cacher = cacher
self.timeout_in_seconds = timeout_in_seconds
self.encoding = encoding
self._parameters: typing.List[typing.Tuple[str, str]] = [] # [(param_code, param_value)]
self._headers: typing.Dict[str, str] = {} # {header_code : header_value}
@property
def parameters(self) -> typing.List[typing.Tuple[str, str]]:
return self._parameters
@parameters.setter
def parameters(self, value: typing.List[typing.Tuple[str, str]]) -> None:
self._parameters = value
@property
def headers(self) -> typing.Dict[str, str]:
return self._headers
@headers.setter
def headers(self, value: typing.Dict[str, str]) -> None:
self._headers = value
def read_string_from(self, request: urllib.request.Request, encoding: str) -> str:
""" Read data from request as string
Can be mocked in tests.
:param request: ``urllib.request.Request`` instance.
:param encoding: Encoding of downloaded string.
:return: Downloaded string.
"""
try:
with urllib.request.urlopen(request, timeout=self.timeout_in_seconds) as response:
data: bytearray = response.read()
except (urllib.error.URLError, http.client.HTTPException) as ex:
raise DownloadError() from ex
self.logger.debug(f"Downloaded {len(data or '')} bytes")
return data.decode(encoding)
def build_query(self, url: str) -> urllib.request.Request:
""" Build request object based on `parameters` and `headers` attributes.
:param url: URL of request.
:return: ``urllib.request.Request`` instance.
"""
data = urllib.parse.urlencode(self.parameters)
if data:
url += '?' + data
req = urllib.request.Request(url, headers=self.headers, method='GET')
return req
def download_string_impl(self, request: urllib.request.Request, encoding: str) -> str:
""" Calls when real (not cached) download needed.
:param request: ``urllib.request.Request`` instance.
:param encoding: Encoding of downloaded string.
:return: Downloaded string.
"""
self.logger.info(f"Download from {request.full_url!r} to string")
return self.read_string_from(request, encoding)
def download_string(self, url: str, encoding: str = None) -> UrlDownloadStringResult:
request = self.build_query(url)
encoding = self.encoding if encoding is None else encoding
parameters = self.parameters.copy()
headers = self.headers.copy()
got_from_cache, result = self.cacher.retrieve(
url,
parameters,
headers,
lambda _request=request, _encoding=encoding: self.download_string_impl(_request, _encoding))
if got_from_cache:
self.logger.info(f"Got string for {request.full_url!r} from cache")
return UrlDownloadStringResult(result, self.cacher, url, parameters, headers)
|
/sane_finances-2.0-py3-none-any.whl/sane_finances/communication/url_downloader.py
| 0.621196 | 0.157687 |
url_downloader.py
|
pypi
|
import abc
import logging
import datetime
import typing
logging.getLogger().addHandler(logging.NullHandler())
class ExpiryCalculator:
""" Uses for calculations of expiry and revive moments.
"""
def is_expired(self, expiry_moment: datetime.datetime) -> bool:
""" Verify `expiry_moment` for expiration.
:param expiry_moment: Moment to verify.
:return: ``True`` if 'expiry_moment' was in past (i.e. expired).
"""
return datetime.datetime.now(tz=datetime.timezone.utc) > expiry_moment
def get_expiry_moment(self, delta: datetime.timedelta, start_from: datetime.datetime = None) -> datetime.datetime:
""" Calculate and return moment shifted on `delta` time from `start_from` moment.
If `start_from` is ``None`` then moment shifts from current moment (from now).
:param delta: Timedelta for calculation.
:param start_from: Start moment for calculation.
:return: Calculated expiry moment relatively to `start_from` on `delta` time.
"""
if start_from is None:
start_from = datetime.datetime.now(tz=datetime.timezone.utc)
return start_from + delta
def get_revive_moment(self) -> datetime.datetime:
""" Usually return current moment (now).
:return: Moment of this method invoking.
"""
return datetime.datetime.now(tz=datetime.timezone.utc)
class BaseCacher(abc.ABC):
""" Base class (interface) for all cachers.
"""
@abc.abstractmethod
def retrieve(
self,
url: str,
parameters: typing.List[typing.Tuple[str, str]],
headers: typing.Dict[str, str],
reviver: typing.Callable[[], str]) -> typing.Tuple[bool, str]:
""" Try to find previously downloaded string by arguments inside the internal storage.
If not found, then call `reviver` and store it result.
:param url: URL of request.
:param parameters: Parameters of request.
:param headers: Headers of request.
:param reviver: Function that returns downloaded string if such not found in cache yet.
:return: Pair: (got_from_cache, result)
"""
raise NotImplementedError
@abc.abstractmethod
def drop(
self,
url: str,
parameters: typing.List[typing.Tuple[str, str]],
headers: typing.Dict[str, str]) -> bool:
""" Drop items from internal storage by parameters.
:param url: URL of request.
:param parameters: Parameters of request.
:param headers: Headers of request.
:return: ``True`` if any item was dropped. ``False`` if no items was in storage by that parameters.
"""
raise NotImplementedError
@abc.abstractmethod
def has(
self,
url: str,
parameters: typing.List[typing.Tuple[str, str]],
headers: typing.Dict[str, str]) -> bool:
""" Verify that cache contains items with such parameters.
:param url: URL of request.
:param parameters: Parameters of request.
:param headers: Headers of request.
:return: ``True`` if there are any item is in storage by that parameters.
"""
raise NotImplementedError
@abc.abstractmethod
def is_empty(self) -> bool:
""" Verify that cache is empty.
:return: ``True`` if cache is empty
"""
raise NotImplementedError
@abc.abstractmethod
def full_clear(self):
""" Full clear of internal storage.
"""
raise NotImplementedError
class ExpirableCacher(BaseCacher, abc.ABC):
""" Base class for cachers that can expire their stored items.
"""
@property
@abc.abstractmethod
def expiry(self) -> datetime.timedelta:
""" Current timedelta value of expiry.
"""
raise NotImplementedError
@expiry.setter
@abc.abstractmethod
def expiry(self, delta: datetime.timedelta):
""" Set new value of timedelta for expiry.
Usually calls `clean` for revisiting stored items due to new expiry value.
:param delta: New value.
"""
raise NotImplementedError
@abc.abstractmethod
def clean(self):
""" Revisit all cached items, drop expired, update expiry moment if needed.
"""
raise NotImplementedError
class DummyCacher(BaseCacher):
""" Cacher without cache.
"""
def retrieve(
self,
url: str,
parameters: typing.List[typing.Tuple[str, str]],
headers: typing.Dict[str, str],
reviver: typing.Callable[[], str]) -> typing.Tuple[bool, str]:
return False, reviver()
def drop(
self,
url: str,
parameters: typing.List[typing.Tuple[str, str]],
headers: typing.Dict[str, str]) -> bool:
return False
def has(
self,
url: str,
parameters: typing.List[typing.Tuple[str, str]],
headers: typing.Dict[str, str]) -> bool:
return False
def is_empty(self) -> bool:
return True
def full_clear(self):
pass
class _InMemoryCacheKey(typing.NamedTuple):
url: str
parameters: typing.Tuple[typing.Tuple[str, str]]
headers: typing.Tuple[typing.Tuple[str, str]]
class _InMemoryCacheItem(typing.NamedTuple):
key: _InMemoryCacheKey
result: str
revive_moment: datetime.datetime
expiry_moment: datetime.datetime
class InMemoryCacher(ExpirableCacher):
""" In-memory cacher based on dictionary.
"""
default_expiry: datetime.timedelta = datetime.timedelta() # zero delta
_expiry: datetime.timedelta
_storage: typing.Dict[_InMemoryCacheKey, _InMemoryCacheItem]
def __init__(self,
initial_expiry: datetime.timedelta = None,
expiry_calculator: ExpiryCalculator = None):
""" Initialize cacher.
:param initial_expiry: Initial value of expiry.
:param expiry_calculator: Expiry calculator used for such calculations.
"""
self.expiry_calculator = ExpiryCalculator() if expiry_calculator is None else expiry_calculator
self._expiry = self.default_expiry if initial_expiry is None else initial_expiry
self._storage = {}
@property
def expiry(self) -> datetime.timedelta:
return self._expiry
@expiry.setter
def expiry(self, delta: datetime.timedelta):
self._expiry = delta
self.clean()
@staticmethod
def _build_key(
url: str,
parameters: typing.List[typing.Tuple[str, str]],
headers: typing.Dict[str, str]):
key = _InMemoryCacheKey(
url=str(url),
parameters=tuple((str(param_name), str(param_value)) for param_name, param_value in parameters),
headers=tuple((str(header_name), str(header_value)) for header_name, header_value in headers.items())
)
return key
def clean(self):
items = tuple(self._storage.values())
for item in items:
new_expiry_moment = self.expiry_calculator.get_expiry_moment(
self.expiry,
start_from=item.revive_moment)
if (self.expiry_calculator.is_expired(item.expiry_moment)
or self.expiry_calculator.is_expired(new_expiry_moment)):
del self._storage[item.key]
else:
self._storage[item.key] = item._replace(expiry_moment=new_expiry_moment)
def retrieve(
self,
url: str,
parameters: typing.List[typing.Tuple[str, str]],
headers: typing.Dict[str, str],
reviver: typing.Callable[[], str]) -> typing.Tuple[bool, str]:
self.clean()
key = self._build_key(url, parameters, headers)
got_from_cache, cache_item = True, self._storage.get(key, None)
if cache_item is None or self.expiry_calculator.is_expired(cache_item.expiry_moment):
got_from_cache, result = False, reviver()
revive_moment = self.expiry_calculator.get_revive_moment()
expiry_moment = self.expiry_calculator.get_expiry_moment(self.expiry)
self._storage[key] = cache_item = _InMemoryCacheItem(
key=key,
result=result,
revive_moment=revive_moment,
expiry_moment=expiry_moment)
return got_from_cache, cache_item.result
def drop(
self,
url: str,
parameters: typing.List[typing.Tuple[str, str]],
headers: typing.Dict[str, str]) -> bool:
self.clean()
key = self._build_key(url, parameters, headers)
if key in self._storage:
del self._storage[key]
return True
return False
def has(
self,
url: str,
parameters: typing.List[typing.Tuple[str, str]],
headers: typing.Dict[str, str]) -> bool:
self.clean()
key = self._build_key(url, parameters, headers)
return key in self._storage
def is_empty(self) -> bool:
self.clean()
return len(self._storage) == 0
def full_clear(self):
self._storage.clear()
|
/sane_finances-2.0-py3-none-any.whl/sane_finances/communication/cachers.py
| 0.863406 | 0.537466 |
cachers.py
|
pypi
|
import datetime
import inspect
import typing
import logging
from ..communication.downloader import DownloadError
from .base import (
InstrumentStringDataDownloader, SourceDownloadError,
InstrumentValuesHistoryParser, InstrumentInfoParser, InstrumentHistoryValuesExporter,
InstrumentInfoProvider, InstrumentValueProvider,
InstrumentExporterRegistry, InstrumentExporterFactory, InstrumentsInfoExporter,
InstrumentHistoryDownloadParameters, InstrumentValuesHistoryEmpty, MaxPagesLimitExceeded, InstrumentInfoEmpty)
logging.getLogger().addHandler(logging.NullHandler())
class GenericInstrumentHistoryValuesExporter(InstrumentHistoryValuesExporter):
""" Generic, used by default, instrument history exporter.
"""
max_paged_parameters = 10000 # limit of paged parameters
def __init__(
self,
string_data_downloader: InstrumentStringDataDownloader,
history_values_parser: InstrumentValuesHistoryParser):
""" Initialize exporter.
:param string_data_downloader: Used instrument string data downloader.
:param history_values_parser: Used instrument values history parser.
"""
self.logger = logging.getLogger(__name__ + '.' + self.__class__.__name__)
self.string_data_downloader = string_data_downloader
self.history_values_parser = history_values_parser
def export_instrument_history_values(
self,
parameters: InstrumentHistoryDownloadParameters,
moment_from: datetime.datetime,
moment_to: datetime.datetime) -> typing.Iterable[InstrumentValueProvider]:
if moment_from > moment_to:
raise ValueError(f"Moment from ({moment_from}) is greater then moment to ({moment_to})")
self.logger.info(f"Begin to export instrument history values "
f"in [{moment_from.isoformat(), moment_to.isoformat()}] "
f"by parameters: {parameters}")
parameters, moment_from, moment_to = self.string_data_downloader.adjust_download_instrument_history_parameters(
parameters=parameters,
moment_from=moment_from,
moment_to=moment_to)
self.logger.info(f"Parameters was adjusted to: {parameters}")
self.logger.info(f"Interval was adjusted to: {moment_from.isoformat()}..{moment_to.isoformat()}")
paged_parameters_index = 0
for paged_parameters, paged_moment_from, paged_moment_to in \
self.string_data_downloader.paginate_download_instrument_history_parameters(
parameters=parameters,
moment_from=moment_from,
moment_to=moment_to):
self.logger.info(f"Begin to export instrument history values "
f"by paged parameters: {paged_parameters}, "
f"paged interval: {paged_moment_from}..{paged_moment_to}")
paged_parameters_index += 1
if paged_parameters_index >= self.max_paged_parameters:
raise MaxPagesLimitExceeded(self.max_paged_parameters)
try:
history_data_string_result = \
self.string_data_downloader.download_instrument_history_string(
parameters=paged_parameters,
moment_from=paged_moment_from,
moment_to=paged_moment_to)
except DownloadError as ex:
raise SourceDownloadError(f"Download error {ex} for parameters '{paged_parameters}', "
f"moment from '{moment_from.isoformat()}', "
f"moment to '{moment_to.isoformat()}'") from ex
self.history_values_parser.download_parameters = paged_parameters
try:
values_providers = self.history_values_parser.parse(
history_data_string_result.downloaded_string,
moment_from.tzinfo)
all_values = ((value_provider.get_instrument_value(moment_from.tzinfo), value_provider)
for value_provider
in values_providers)
value_providers = (value_provider
for value, value_provider
in all_values
if moment_from <= value.moment <= moment_to)
yield from value_providers
except InstrumentValuesHistoryEmpty:
# history data exhausted
history_data_string_result.set_correctness(True)
return
except Exception:
history_data_string_result.set_correctness(False)
raise
history_data_string_result.set_correctness(True)
class GenericInstrumentsInfoExporter(InstrumentsInfoExporter):
""" Generic, used by default, instrument info exporter.
"""
max_paged_parameters = 10000 # limit of paged parameters
def __init__(
self,
string_data_downloader: InstrumentStringDataDownloader,
info_parser: InstrumentInfoParser):
""" Initialize exporter.
:param string_data_downloader: Used instrument string data downloader.
:param info_parser: Used instrument info parser.
"""
self.logger = logging.getLogger(__name__ + '.' + self.__class__.__name__)
self.string_data_downloader = string_data_downloader
self.info_parser = info_parser
def export_instruments_info(self, parameters) -> typing.Iterator[InstrumentInfoProvider]:
self.logger.info(f"Begin to export instruments info "
f"by parameters: {parameters}")
paged_parameters_index = 0
for paged_parameters in self.string_data_downloader.paginate_download_instruments_info_parameters(
parameters=parameters):
self.logger.info(f"Begin to export instrument info "
f"by paged parameters: {paged_parameters}")
paged_parameters_index += 1
if paged_parameters_index >= self.max_paged_parameters:
raise MaxPagesLimitExceeded(self.max_paged_parameters)
try:
info_data_string_result = \
self.string_data_downloader.download_instruments_info_string(parameters=paged_parameters)
except DownloadError as ex:
raise SourceDownloadError(f"Download error {ex} for parameters '{paged_parameters}'") from ex
self.info_parser.download_parameters = paged_parameters
try:
info_providers = self.info_parser.parse(info_data_string_result.downloaded_string)
yield from info_providers
except InstrumentInfoEmpty:
# info data exhausted
info_data_string_result.set_correctness(True)
return
except Exception:
info_data_string_result.set_correctness(False)
raise
info_data_string_result.set_correctness(True)
# Global registry for all available sources and theirs factories
_GlobalInstrumentExporterRegistry: \
typing.Dict[typing.Type[InstrumentExporterFactory], InstrumentExporterRegistry] = {}
def register_instrument_history_values_exporter(registry: InstrumentExporterRegistry):
""" Register instrument data exporter. Validate `registry` and store it in internal global cache.
:param registry: Instrument exporter to register.
"""
if registry is None:
raise ValueError("'registry' is None")
if not isinstance(registry, InstrumentExporterRegistry):
raise TypeError(f"'registry' is not {InstrumentExporterRegistry.__name__}: {registry!r}")
if registry.factory is None:
raise ValueError("'registry.factory' is None")
if not isinstance(registry.factory, InstrumentExporterFactory):
raise TypeError(f"'registry.factory' is not {InstrumentExporterFactory.__name__}: {registry.factory!r}")
if registry.factory.__class__ in _GlobalInstrumentExporterRegistry:
raise ValueError(f"Factory {registry.factory!r} already registered")
if registry.name is None:
raise ValueError("'registry.name' is None")
if not str(registry.name).strip():
raise ValueError("'registry.name' is empty or whitespace only")
_GlobalInstrumentExporterRegistry[registry.factory.__class__] = registry
def get_all_instrument_exporters() -> typing.Tuple[InstrumentExporterRegistry, ...]:
""" Get all available (registered) instrument data exporters.
:return: Tuple of all available (registered) instrument data exporters
"""
return tuple(_GlobalInstrumentExporterRegistry.values())
def get_instrument_exporter_by_factory(
factory: typing.Union[typing.Type[InstrumentExporterFactory], InstrumentExporterFactory]) \
-> typing.Optional[InstrumentExporterRegistry]:
""" Find registered instrument data exporter by its factory (instance or class) and return it.
:param factory: Factory instance or class of registered data exporter.
:return: ``None`` if not found.
"""
if inspect.isclass(factory):
return _GlobalInstrumentExporterRegistry.get(factory, None)
factory: InstrumentExporterFactory
return _GlobalInstrumentExporterRegistry.get(factory.__class__, None)
|
/sane_finances-2.0-py3-none-any.whl/sane_finances/sources/generic.py
| 0.679179 | 0.190536 |
generic.py
|
pypi
|
import dataclasses
import datetime
import decimal
import enum
import typing
from ...base import (
InstrumentValue, InstrumentInfo, InstrumentValueProvider, InstrumentInfoProvider,
InstrumentHistoryDownloadParameters, DownloadParametersFactory)
from ...inspection import InstrumentInfoParameter
from ....annotations import LEGACY_ANNOTATIONS, Volatile
if LEGACY_ANNOTATIONS: # pragma: no cover
from ....annotations import Annotated
else: # pragma: no cover
from typing import Annotated # pylint: disable=no-name-in-module
class HistoryFieldNames(enum.Enum):
""" Field names in history JSON.
"""
RETURN_TYPE_HOLDER = 'idsIndexReturnTypeHolder'
RETURN_TYPE_CODE = 'returnTypeCode'
RETURN_TYPE_NAME = 'returnTypeName'
CURRENCY_HOLDER = 'idsIndexCurrencyHolder'
CURRENCY_CODE = 'currencyCode'
DETAIL_HOLDER = 'indexDetailHolder'
LEVELS_HOLDER = 'indexLevelsHolder'
LEVELS = 'indexLevels'
EFFECTIVE_DATE = 'effectiveDate'
INDEX_ID = 'indexId'
INDEX_VALUE = 'indexValue'
SERVICE_MESSAGES = 'serviceMessages'
STATUS = 'status'
class InfoFieldNames(enum.Enum):
""" Field names in info JSON.
"""
PAGINATION = 'pagination'
PAGE_SIZE = 'pageSize'
START_PAGE_INDEX = 'startPageIndex'
TOTAL_PAGES = 'totalPages'
RESPONSE = 'response'
INDEX_ID = 'indexId'
INDEX_NAME = 'indexName'
URL_TITLE = 'urlTitle'
class IndexFinderFilterGroup(typing.NamedTuple):
""" Index finder filter group
"""
name: str
label: str
@classmethod
def safe_create(
cls: typing.Type['IndexFinderFilterGroup'],
*,
name: str,
label: str) -> 'IndexFinderFilterGroup':
""" Create new instance of ``IndexFinderFilterGroup`` with arguments check.
:param name: Name of filter group.
:param label: Label for filter group.
:return: ``IndexFinderFilterGroup`` instance.
"""
return cls(name=str(name), label=str(label))
class IndexFinderFilter(typing.NamedTuple):
""" Index finder filter
"""
group: IndexFinderFilterGroup
label: str
value: str
@classmethod
def safe_create(
cls: typing.Type['IndexFinderFilter'],
*,
group: IndexFinderFilterGroup,
label: str,
value: str) -> 'IndexFinderFilter':
""" Create new instance of ``IndexFinderFilter`` with arguments check.
:param group: Index finder filter group.
:param label: Label for filter parameter.
:param value: Value of filter parameter.
:return: ``IndexFinderFilter`` instance.
"""
if not isinstance(group, IndexFinderFilterGroup):
raise TypeError("'group' is not IndexFinderFilterGroup")
return cls(
group=group,
label=str(label),
value=str(value))
class Currency(typing.NamedTuple):
""" Index currency.
"""
currency_code: str
@classmethod
def safe_create(
cls: typing.Type['Currency'],
*,
currency_code: str) -> 'Currency':
""" Create new instance of ``Currency`` with arguments check.
:param currency_code: Currency code.
:return: ``Currency`` instance.
"""
return cls(currency_code=str(currency_code))
class ReturnType(typing.NamedTuple):
""" Index return type.
For example price return, total return or net total return.
"""
return_type_code: str
return_type_name: str
@classmethod
def safe_create(
cls: typing.Type['ReturnType'],
*,
return_type_code: str,
return_type_name: str) -> 'ReturnType':
""" Create new instance of ``ReturnType`` with arguments check.
:param return_type_code: Return type code.
:param return_type_name: Return type name.
:return: ``ReturnType`` instance.
"""
return cls(return_type_code=str(return_type_code), return_type_name=str(return_type_name))
class IndexMetaData(typing.NamedTuple):
""" Container for index meta data
"""
currencies: typing.Tuple[Currency, ...]
return_types: typing.Tuple[ReturnType, ...]
index_finder_filters: typing.Tuple[IndexFinderFilter, ...]
@dataclasses.dataclass
class IndexLevel(InstrumentValueProvider):
""" Container for index history value.
"""
index_id: str
effective_date: datetime.datetime
index_value: decimal.Decimal
def __init__(self,
*,
index_id: str,
effective_date: datetime.datetime,
index_value: decimal.Decimal):
if not isinstance(effective_date, datetime.datetime):
raise TypeError("'effective_date' is not datetime")
self.index_id = index_id
self.effective_date = effective_date
self.index_value = decimal.Decimal(index_value)
def __str__(self):
return (f"S&P Dow Jones index level ("
f"index_id={self.index_id}, "
f"effective_date={self.effective_date.isoformat()}, "
f"index_value={self.index_value})")
def get_instrument_value(self, tzinfo: typing.Optional[datetime.timezone]) -> InstrumentValue:
moment = self.effective_date.astimezone(tzinfo)
return InstrumentValue(value=self.index_value, moment=moment)
@dataclasses.dataclass
class IndexInfo(InstrumentInfoProvider):
""" Container for index information.
"""
index_id: str
index_name: str
url: str
def __init__(self, *, index_id: str, index_name: str, url: str):
self.index_id = str(index_id)
self.index_name = str(index_name)
self.url = str(url)
def __str__(self):
return (f"S&P Dow Jones index ("
f"index_id={self.index_id}, "
f"index_name={self.index_name}, "
f"url={self.url})")
@property
def instrument_info(self) -> InstrumentInfo:
return InstrumentInfo(code=self.index_id, name=self.index_name)
class SpdjIndexesInfoDownloadParameters(typing.NamedTuple):
""" Container for ``SpdjStringDataDownloader.download_instruments_info_string parameters``.
"""
page_number: Annotated[int, Volatile(generator=lambda ctx: 1, stub_value=1)]
index_finder_filter: IndexFinderFilter = None
@classmethod
def safe_create(
cls: typing.Type['SpdjIndexesInfoDownloadParameters'],
*,
page_number: int,
index_finder_filter: IndexFinderFilter = None) -> 'SpdjIndexesInfoDownloadParameters':
""" Create new instance of ``SpdjIndexesInfoDownloadParameters`` with arguments check.
:param page_number: Number of page to download.
:param index_finder_filter: Index finder filters or ``None``.
:return: ``SpdjIndexesInfoDownloadParameters`` instance.
"""
if index_finder_filter is not None and not isinstance(index_finder_filter, IndexFinderFilter):
raise TypeError("'index_finder_filter' is not IndexFinderFilter")
return cls(index_finder_filter=index_finder_filter, page_number=int(page_number))
@dataclasses.dataclass
class SpdjIndexHistoryDownloadParameters(InstrumentHistoryDownloadParameters):
""" Container for ``SpdjStringDataDownloader.download_instrument_history_string parameters``.
"""
index_id: Annotated[str, InstrumentInfoParameter(instrument_identity=True)]
currency: Currency
return_type: ReturnType
def clone_with_instrument_info_parameters(
self,
info_download_parameters: typing.Optional[SpdjIndexesInfoDownloadParameters],
instrument_info: typing.Optional[IndexInfo]) -> 'SpdjIndexHistoryDownloadParameters':
return SpdjIndexHistoryDownloadParameters.generate_from(self, info_download_parameters, instrument_info)
# noinspection PyUnusedLocal
@classmethod
def generate_from(
cls: typing.Type['SpdjIndexHistoryDownloadParameters'],
history_download_parameters: typing.Optional['SpdjIndexHistoryDownloadParameters'],
info_download_parameters: typing.Optional[SpdjIndexesInfoDownloadParameters],
instrument_info: typing.Optional[IndexInfo]) -> 'SpdjIndexHistoryDownloadParameters':
""" Create new history download parameters instance with data from its arguments.
:param history_download_parameters: Optional instrument history download parameters for cloning.
:param info_download_parameters: Optional instrument info download parameters for cloning.
:param instrument_info: Optional instrument info for cloning.
:return: Cloned history download parameters instance (self) with replacing some attributes from
`info_download_parameters` and `instrument_info`.
"""
return cls(
index_id=((None if history_download_parameters is None else history_download_parameters.index_id)
if instrument_info is None
else instrument_info.index_id),
currency=(None if history_download_parameters is None else history_download_parameters.currency),
return_type=(None if history_download_parameters is None else history_download_parameters.return_type)
)
@classmethod
def safe_create(
cls: typing.Type['SpdjIndexHistoryDownloadParameters'],
*,
index_id: str,
currency: Currency,
return_type: ReturnType) -> 'SpdjIndexHistoryDownloadParameters':
""" Create new instance of ``SpdjIndexHistoryDownloadParameters`` with arguments check.
:param index_id: Index code.
:param currency: Currency.
:param return_type: Return type.
:return: ``SpdjIndexHistoryDownloadParameters`` instance.
"""
if not isinstance(currency, Currency):
raise TypeError(f"'currency' is not Currency: {currency!r}")
if not isinstance(return_type, ReturnType):
raise TypeError(f"'return_type' is not ReturnType: {return_type!r}")
return cls(
index_id=str(index_id),
currency=currency,
return_type=return_type)
class SpdjDownloadParametersFactory(DownloadParametersFactory):
""" Download parameters factories and generators for S&P Dow Jones.
"""
@property
def download_history_parameters_class(self) -> typing.Type[SpdjIndexHistoryDownloadParameters]:
return SpdjIndexHistoryDownloadParameters
@property
def download_history_parameters_factory(self) -> typing.Callable[..., SpdjIndexHistoryDownloadParameters]:
return SpdjIndexHistoryDownloadParameters.safe_create
@property
def download_info_parameters_class(self):
return SpdjIndexesInfoDownloadParameters
@property
def download_info_parameters_factory(self) -> typing.Callable[..., typing.Any]:
return SpdjIndexesInfoDownloadParameters.safe_create
def generate_history_download_parameters_from(
self,
history_download_parameters: typing.Optional[SpdjIndexHistoryDownloadParameters],
info_download_parameters: typing.Optional[SpdjIndexesInfoDownloadParameters],
instrument_info: typing.Optional[IndexInfo]) -> SpdjIndexHistoryDownloadParameters:
return SpdjIndexHistoryDownloadParameters.generate_from(
history_download_parameters,
info_download_parameters,
instrument_info)
|
/sane_finances-2.0-py3-none-any.whl/sane_finances/sources/spdji/v2021/meta.py
| 0.755276 | 0.182225 |
meta.py
|
pypi
|
import decimal
import datetime
import enum
import typing
import dataclasses
from ...base import (
InstrumentValue, InstrumentInfo, InstrumentValueProvider, InstrumentInfoProvider,
InstrumentHistoryDownloadParameters, DownloadParametersFactory)
from ...inspection import InstrumentInfoParameter
from ....annotations import LEGACY_ANNOTATIONS
if LEGACY_ANNOTATIONS: # pragma: no cover
from ....annotations import Annotated
else: # pragma: no cover
from typing import Annotated # pylint: disable=no-name-in-module
class IndexHistoryTypes(enum.Enum):
""" History type (period).
"""
INTRADAY = 'intraday'
MAX = 'max'
class FileExtensions(enum.Enum):
""" File extension.
"""
JSON = '.json'
class FieldNames(enum.Enum):
""" Field names in JSON.
"""
INDEX_ID = 'indexId'
TIMESTAMP = 'timestamp'
VALUE = 'value'
@dataclasses.dataclass
class IndexValue(InstrumentValueProvider):
""" Container for index history item.
"""
index_id: str
moment: datetime.datetime
value: decimal.Decimal
def __init__(self, *, index_id: str, moment: datetime.datetime, value: decimal.Decimal):
""" Initialize index value.
:param index_id: Index ID
:param moment: Moment.
:param value: Value of index.
"""
if not isinstance(moment, datetime.datetime):
raise TypeError("'moment' is not datetime")
self.index_id = str(index_id)
self.moment = moment
self.value = decimal.Decimal(value)
def get_instrument_value(self, tzinfo: typing.Optional[datetime.timezone]) -> InstrumentValue:
moment = self.moment.astimezone(tzinfo)
return InstrumentValue(value=self.value, moment=moment)
@dataclasses.dataclass
class IndexInfo(InstrumentInfoProvider):
""" Container for index information.
"""
isin: str
name: str
def __init__(self, *, isin: str, name: str):
self.isin = str(isin)
self.name = str(name)
def __str__(self):
return (f"Solactive index ("
f"isin={self.isin}, "
f"name={self.name})")
@property
def instrument_info(self) -> InstrumentInfo:
return InstrumentInfo(code=self.isin, name=self.name)
class SolactiveIndexesInfoDownloadParameters(typing.NamedTuple):
""" Container for ``SolactiveStringDataDownloader.download_instruments_info_string`` parameters.
"""
@classmethod
def safe_create(
cls: typing.Type['SolactiveIndexesInfoDownloadParameters']) -> 'SolactiveIndexesInfoDownloadParameters':
""" Create new instance of ``SolactiveIndexesInfoDownloadParameters`` with arguments check.
:return: ``SolactiveIndexesInfoDownloadParameters`` instance.
"""
return cls()
@dataclasses.dataclass
class SolactiveIndexHistoryDownloadParameters(InstrumentHistoryDownloadParameters):
""" Container for ``SolactiveStringDataDownloader.download_instrument_history_string`` parameters.
"""
isin: Annotated[str, InstrumentInfoParameter(instrument_identity=True)]
def clone_with_instrument_info_parameters(
self,
info_download_parameters: typing.Optional[SolactiveIndexesInfoDownloadParameters],
instrument_info: typing.Optional[IndexInfo]) -> 'SolactiveIndexHistoryDownloadParameters':
return SolactiveIndexHistoryDownloadParameters.generate_from(self, info_download_parameters, instrument_info)
# noinspection PyUnusedLocal
@classmethod
def generate_from(
cls: typing.Type['SolactiveIndexHistoryDownloadParameters'],
history_download_parameters: typing.Optional['SolactiveIndexHistoryDownloadParameters'],
info_download_parameters: typing.Optional[SolactiveIndexesInfoDownloadParameters],
instrument_info: typing.Optional[IndexInfo]) -> 'SolactiveIndexHistoryDownloadParameters':
""" Create new history download parameters instance with data from its arguments.
:param history_download_parameters: Optional instrument history download parameters for cloning.
:param info_download_parameters: Optional instrument info download parameters for cloning.
:param instrument_info: Optional instrument info for cloning.
:return: Cloned history download parameters instance (self) with replacing some attributes from
`info_download_parameters` and `instrument_info`.
"""
return cls(
isin=((None if history_download_parameters is None else history_download_parameters.isin)
if instrument_info is None
else instrument_info.isin)
)
@classmethod
def safe_create(
cls: typing.Type['SolactiveIndexHistoryDownloadParameters'],
*,
isin: str) -> 'SolactiveIndexHistoryDownloadParameters':
""" Create new instance of ``SolactiveIndexHistoryDownloadParameters`` with arguments check.
:param isin: ISIN.
:return: ``SolactiveIndexHistoryDownloadParameters`` instance.
"""
return cls(isin=str(isin))
class SolactiveDownloadParametersFactory(DownloadParametersFactory):
""" Download parameters factories and generators for Solactive.
"""
@property
def download_history_parameters_class(self) -> typing.Type[SolactiveIndexHistoryDownloadParameters]:
return SolactiveIndexHistoryDownloadParameters
@property
def download_history_parameters_factory(self) -> typing.Callable[..., SolactiveIndexHistoryDownloadParameters]:
return SolactiveIndexHistoryDownloadParameters.safe_create
@property
def download_info_parameters_class(self):
return SolactiveIndexesInfoDownloadParameters
@property
def download_info_parameters_factory(self) -> typing.Callable[..., typing.Any]:
return SolactiveIndexesInfoDownloadParameters.safe_create
def generate_history_download_parameters_from(
self,
history_download_parameters: typing.Optional[SolactiveIndexHistoryDownloadParameters],
info_download_parameters: typing.Optional[SolactiveIndexesInfoDownloadParameters],
instrument_info: typing.Optional[IndexInfo]) -> SolactiveIndexHistoryDownloadParameters:
return SolactiveIndexHistoryDownloadParameters.generate_from(
history_download_parameters,
info_download_parameters,
instrument_info)
|
/sane_finances-2.0-py3-none-any.whl/sane_finances/sources/solactive/v2018/meta.py
| 0.761006 | 0.224565 |
meta.py
|
pypi
|
import datetime
import logging
import typing
import urllib.parse
from .meta import (
IntervalTypes, YahooInstrumentInfoDownloadParameters, YahooInstrumentHistoryDownloadParameters,
YahooDownloadParametersFactory)
from .parsers import YahooQuotesJsonParser, YahooInstrumentInfoParser
from ...base import (
ApiActualityChecker, InstrumentStringDataDownloader, InstrumentExporterFactory,
InstrumentHistoryValuesExporter, InstrumentsInfoExporter,
DownloadParameterValuesStorage, ParseError,
CheckApiActualityError)
from ...generic import GenericInstrumentHistoryValuesExporter, GenericInstrumentsInfoExporter
from ....communication.downloader import Downloader, DownloadStringResult
logging.getLogger().addHandler(logging.NullHandler())
class YahooFinanceStringDataDownloader(InstrumentStringDataDownloader):
""" Data downloader from https://finance.yahoo.com/.
"""
quotes_history_url = 'https://query2.finance.yahoo.com/v8/finance/chart/'
search_url = 'https://query2.finance.yahoo.com/v1/finance/search'
def __init__(self, downloader: Downloader):
"""
"""
self.logger = logging.getLogger(__name__ + '.' + self.__class__.__name__)
self.downloader = downloader
# headers HTTP
self.headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/39.0.2171.95 Safari/537.36'
}
# query parameters
self.instrument_history_interval_type = IntervalTypes.ONE_DAY
def download_instrument_history_string(
self,
parameters: YahooInstrumentHistoryDownloadParameters,
moment_from: datetime.datetime,
moment_to: datetime.datetime) -> DownloadStringResult:
return self.download_quotes_string(parameters.symbol, moment_from, moment_to)
def download_instruments_info_string(
self,
parameters: YahooInstrumentInfoDownloadParameters) -> DownloadStringResult:
return self.download_instruments_search_string(parameters.search_string)
def download_quotes_string(
self,
symbol: str,
moment_from: datetime.datetime,
moment_to: datetime.datetime) -> DownloadStringResult:
""" Downloads data for one instrument as string.
:param symbol: Instrument symbol.
:param moment_from: Download interval beginning.
:param moment_to: Download interval ending.
:return: Container with downloaded string.
"""
headers = dict(self.headers) # make a copy
first_date = datetime.datetime(1970, 1, 1, tzinfo=moment_from.tzinfo)
period1 = int((moment_from - first_date).total_seconds())
period2 = int((moment_to - first_date).total_seconds())
self.downloader.parameters = [
('interval', self.instrument_history_interval_type.value),
('period1', period1),
('period2', period2)
]
self.downloader.headers = headers
url = self.quotes_history_url + urllib.parse.quote(str(symbol))
return self.downloader.download_string(url)
def download_instruments_search_string(self, search_string: str) -> DownloadStringResult:
""" Downloads string with the list of all found instruments
:param search_string: String to search.
:return: Container with downloaded string.
"""
headers = dict(self.headers) # make a copy
self.downloader.parameters = [('q', str(search_string))]
self.downloader.headers = headers
return self.downloader.download_string(self.search_url)
class YahooFinanceApiActualityChecker(ApiActualityChecker):
""" Verifies actuality and accessibility of REST API of finance.yahoo.com.
"""
actuality_check_symbol = '^GSPC' # S&P 500
def __init__(self,
string_data_downloader: YahooFinanceStringDataDownloader,
json_parser: YahooQuotesJsonParser):
self.logger = logging.getLogger(__name__ + '.' + self.__class__.__name__)
self.string_data_downloader = string_data_downloader
self.json_parser = json_parser
def check(self):
expected_symbol = self.actuality_check_symbol
self.logger.info(f"Check actuality via {expected_symbol!r}")
now = datetime.datetime.utcnow()
str_data_result = self.string_data_downloader.download_quotes_string(
symbol=expected_symbol,
moment_from=now,
moment_to=now
)
self.logger.debug(f"Got str data: {str_data_result.downloaded_string}")
try:
_ = list(self.json_parser.parse(str_data_result.downloaded_string, tzinfo=None))
except ParseError as ex:
str_data_result.set_correctness(False)
raise CheckApiActualityError(f"Unexpected instrument history JSON: {ex.message}") from ex
except Exception:
str_data_result.set_correctness(False)
raise
self.logger.info("Actuality check was successful")
class YahooFinanceDownloadParameterValuesStorage(DownloadParameterValuesStorage):
""" Storage of instruments download parameters.
"""
def is_dynamic_enum_type(self, cls: type) -> bool:
return False
def get_all_managed_types(self) -> typing.Iterable[typing.Type]:
return ()
def get_dynamic_enum_key(self, instance):
return None
class YahooFinanceExporterFactory(InstrumentExporterFactory):
""" Factory class for create instances of Yahoo Finance data exporter.
"""
name: str = 'Yahoo Finance data exporter. Version 8.'
provider_site: str = 'https://finance.yahoo.com/'
def __init__(self):
self._dynamic_enum_type_manager = None
self._download_parameters_factory = None
def create_history_values_exporter(self, downloader: Downloader) -> InstrumentHistoryValuesExporter:
string_data_downloader = YahooFinanceStringDataDownloader(downloader)
history_values_parser = YahooQuotesJsonParser()
return GenericInstrumentHistoryValuesExporter(string_data_downloader, history_values_parser)
def create_info_exporter(self, downloader: Downloader) -> InstrumentsInfoExporter:
string_data_downloader = YahooFinanceStringDataDownloader(downloader)
info_parser = YahooInstrumentInfoParser()
return GenericInstrumentsInfoExporter(string_data_downloader, info_parser)
def create_download_parameter_values_storage(self, downloader: Downloader) -> DownloadParameterValuesStorage:
return YahooFinanceDownloadParameterValuesStorage()
def create_api_actuality_checker(self, downloader: Downloader) -> YahooFinanceApiActualityChecker:
string_data_downloader = YahooFinanceStringDataDownloader(downloader)
history_values_parser = YahooQuotesJsonParser()
return YahooFinanceApiActualityChecker(string_data_downloader, history_values_parser)
@property
def dynamic_enum_type_manager(self) -> YahooFinanceDownloadParameterValuesStorage:
if self._dynamic_enum_type_manager is None:
self._dynamic_enum_type_manager = YahooFinanceDownloadParameterValuesStorage()
return self._dynamic_enum_type_manager
@property
def download_parameters_factory(self) -> YahooDownloadParametersFactory:
if self._download_parameters_factory is None:
self._download_parameters_factory = YahooDownloadParametersFactory()
return self._download_parameters_factory
|
/sane_finances-2.0-py3-none-any.whl/sane_finances/sources/yahoo/v8/exporters.py
| 0.624866 | 0.195863 |
exporters.py
|
pypi
|
import decimal
import datetime
import enum
import typing
import dataclasses
from ...base import (
InstrumentValue, InstrumentInfo, InstrumentValueProvider, InstrumentInfoProvider,
InstrumentHistoryDownloadParameters, DownloadParametersFactory)
from ...inspection import InstrumentInfoParameter
from ....annotations import LEGACY_ANNOTATIONS
if LEGACY_ANNOTATIONS: # pragma: no cover
from ....annotations import Annotated
else: # pragma: no cover
from typing import Annotated # pylint: disable=no-name-in-module
class IntervalTypes(enum.Enum):
""" History interval type.
"""
ONE_MINUTE = '1m'
TWO_MINUTES = '2m'
FIVE_MINUTES = '5m'
FIFTEEN_MINUTES = '15m'
THIRTY_MINUTES = '30m'
SIXTY_MINUTES = '60m'
NINETY_MINUTES = '90m'
ONE_HOUR = '1h'
ONE_DAY = '1d'
FIVE_DAYS = '5d'
ONE_WEEK = '1wk'
ONE_MONTH = '1mo'
THREE_MONTHS = '3mo'
class SearchInfoFieldNames(enum.Enum):
""" Field names in JSON from search result
"""
FINANCE = 'finance'
ERROR = 'error'
ERROR_CODE = 'code'
ERROR_DESCRIPTION = 'description'
QUOTES = 'quotes'
SYMBOL = 'symbol'
EXCHANGE = 'exchange'
SHORT_NAME = 'shortname'
LONG_NAME = 'longname'
TYPE_DISP = 'typeDisp'
EXCHANGE_DISP = 'exchDisp'
IS_YAHOO_FINANCE = 'isYahooFinance'
class QuoteHistoryFieldNames(enum.Enum):
""" Field names in JSON from quote history request
"""
CHART = 'chart'
ERROR = 'error'
ERROR_CODE = 'code'
ERROR_DESCRIPTION = 'description'
RESULT = 'result'
META = 'meta'
SYMBOL = 'symbol'
TIMESTAMP = 'timestamp'
INDICATORS = 'indicators'
QUOTE = 'quote'
CLOSE = 'close'
@dataclasses.dataclass
class InstrumentQuoteValue(InstrumentValueProvider):
""" Container for Yahoo instrument history item.
"""
symbol: str
timestamp: datetime.datetime
close: decimal.Decimal
def __init__(self, *, symbol: str, timestamp: datetime.datetime, close: decimal.Decimal):
""" Initialize Yahoo instrument value.
:param symbol: Yahoo instrument symbol.
:param timestamp: Moment.
:param close: Close value of instrument.
"""
if not isinstance(timestamp, datetime.datetime):
raise TypeError("'timestamp' is not datetime")
self.symbol = str(symbol)
self.timestamp = timestamp
self.close = decimal.Decimal(close)
def get_instrument_value(self, tzinfo: typing.Optional[datetime.timezone]) -> InstrumentValue:
timestamp = self.timestamp.astimezone(tzinfo)
return InstrumentValue(value=self.close, moment=timestamp)
@dataclasses.dataclass
class InstrumentQuoteInfo(InstrumentInfoProvider):
""" Container for Yahoo instrument information.
"""
symbol: str
exchange: str
short_name: str
long_name: typing.Optional[str]
type_disp: typing.Optional[str]
exchange_disp: typing.Optional[str]
is_yahoo_finance: bool
def __init__(
self,
*,
symbol: str,
exchange: str,
short_name: str,
long_name: typing.Optional[str],
type_disp: typing.Optional[str],
exchange_disp: typing.Optional[str],
is_yahoo_finance: bool):
self.symbol = str(symbol)
self.exchange = str(exchange)
self.short_name = str(short_name)
self.long_name = None if long_name is None else str(long_name)
self.type_disp = None if type_disp is None else str(type_disp)
self.exchange_disp = None if exchange_disp is None else str(exchange_disp)
self.is_yahoo_finance = bool(is_yahoo_finance)
def __str__(self):
return (f"Yahoo Finance quote ("
f"symbol={self.symbol}, "
f"exchange={self.exchange}, "
f"short_name={self.short_name}, "
f"long_name={self.long_name}, "
f"type_disp={self.type_disp}, "
f"exchange_disp={self.exchange_disp}, "
f"is_yahoo_finance={self.is_yahoo_finance})")
@property
def instrument_info(self) -> InstrumentInfo:
return InstrumentInfo(code=self.symbol, name=self.short_name)
class YahooInstrumentInfoDownloadParameters(typing.NamedTuple):
""" Container for ``YahooStringDataDownloader.download_instruments_info_string`` parameters.
"""
search_string: str
@classmethod
def safe_create(
cls: typing.Type['YahooInstrumentInfoDownloadParameters'],
*,
search_string: str) -> 'YahooInstrumentInfoDownloadParameters':
""" Create new instance of ``YahooInstrumentInfoDownloadParameters`` with arguments check.
:param search_string: Search string.
:return: ``YahooInstrumentInfoDownloadParameters`` instance.
"""
return cls(search_string=str(search_string))
@dataclasses.dataclass
class YahooInstrumentHistoryDownloadParameters(InstrumentHistoryDownloadParameters):
""" Container for ``YahooStringDataDownloader.download_instrument_history_string`` parameters.
"""
symbol: Annotated[str, InstrumentInfoParameter(instrument_identity=True)]
def clone_with_instrument_info_parameters(
self,
info_download_parameters: typing.Optional[YahooInstrumentInfoDownloadParameters],
instrument_info: typing.Optional[InstrumentQuoteInfo]) -> 'YahooInstrumentHistoryDownloadParameters':
return YahooInstrumentHistoryDownloadParameters.generate_from(self, info_download_parameters, instrument_info)
# noinspection PyUnusedLocal
@classmethod
def generate_from(
cls: typing.Type['YahooInstrumentHistoryDownloadParameters'],
history_download_parameters: typing.Optional['YahooInstrumentHistoryDownloadParameters'],
info_download_parameters: typing.Optional[YahooInstrumentInfoDownloadParameters],
instrument_info: typing.Optional[InstrumentQuoteInfo]) -> 'YahooInstrumentHistoryDownloadParameters':
""" Create new history download parameters instance with data from its arguments.
:param history_download_parameters: Optional instrument history download parameters for cloning.
:param info_download_parameters: Optional instrument info download parameters for cloning.
:param instrument_info: Optional instrument info for cloning.
:return: Cloned history download parameters instance (self) with replacing some attributes from
`info_download_parameters` and `instrument_info`.
"""
return cls(
symbol=((None if history_download_parameters is None else history_download_parameters.symbol)
if instrument_info is None
else instrument_info.symbol)
)
@classmethod
def safe_create(
cls: typing.Type['YahooInstrumentHistoryDownloadParameters'],
*,
symbol: str) -> 'YahooInstrumentHistoryDownloadParameters':
""" Create new instance of ``YahooInstrumentHistoryDownloadParameters`` with arguments check.
:param symbol: Instrument symbol.
:return: ``YahooInstrumentHistoryDownloadParameters`` instance.
"""
return cls(symbol=str(symbol))
class YahooDownloadParametersFactory(DownloadParametersFactory):
""" Download parameters factories and generators for Yahoo Finance.
"""
@property
def download_history_parameters_class(self) -> typing.Type[YahooInstrumentHistoryDownloadParameters]:
return YahooInstrumentHistoryDownloadParameters
@property
def download_history_parameters_factory(self) -> typing.Callable[..., YahooInstrumentHistoryDownloadParameters]:
return YahooInstrumentHistoryDownloadParameters.safe_create
@property
def download_info_parameters_class(self):
return YahooInstrumentInfoDownloadParameters
@property
def download_info_parameters_factory(self) -> typing.Callable[..., typing.Any]:
return YahooInstrumentInfoDownloadParameters.safe_create
def generate_history_download_parameters_from(
self,
history_download_parameters: typing.Optional[YahooInstrumentHistoryDownloadParameters],
info_download_parameters: typing.Optional[YahooInstrumentInfoDownloadParameters],
instrument_info: typing.Optional[InstrumentQuoteInfo]) -> YahooInstrumentHistoryDownloadParameters:
return YahooInstrumentHistoryDownloadParameters.generate_from(
history_download_parameters,
info_download_parameters,
instrument_info)
|
/sane_finances-2.0-py3-none-any.whl/sane_finances/sources/yahoo/v8/meta.py
| 0.684897 | 0.159119 |
meta.py
|
pypi
|
import datetime
import decimal
import logging
import typing
from .meta import (
LbmaPreciousMetalHistoryDownloadParameters, LbmaPreciousMetalInfoDownloadParameters, LbmaDownloadParametersFactory,
PreciousMetals, Currencies)
from .parsers import LbmaHistoryJsonParser, LbmaInfoParser
from ...base import (
ApiActualityChecker, InstrumentStringDataDownloader, ParseError,
InstrumentExporterFactory, InstrumentHistoryValuesExporter, InstrumentsInfoExporter,
DownloadParameterValuesStorage, DownloadStringResult,
CheckApiActualityError)
from ...generic import GenericInstrumentHistoryValuesExporter, GenericInstrumentsInfoExporter
from ....communication.downloader import Downloader
logging.getLogger().addHandler(logging.NullHandler())
class LbmaStringDataDownloader(InstrumentStringDataDownloader):
""" Data downloader from www.lbma.org.uk
"""
history_base_url = 'https://prices.lbma.org.uk/json/'
def __init__(self, downloader: Downloader):
self.logger = logging.getLogger(__name__ + '.' + self.__class__.__name__)
self.downloader = downloader
self.params = []
# headers for HTTP
self.headers: typing.Dict[str, str] = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/39.0.2171.95 Safari/537.36'
}
def download_instrument_history_string(
self,
parameters: LbmaPreciousMetalHistoryDownloadParameters,
moment_from: datetime.datetime,
moment_to: datetime.datetime) -> DownloadStringResult:
return self.download_history_string(parameters.metal)
def download_instruments_info_string(
self,
parameters: LbmaPreciousMetalInfoDownloadParameters) -> DownloadStringResult:
""" Do nothing, because all instruments info is well known on compile time.
:param parameters: Source specific instruments info download parameters.
:return: Empty container.
"""
return DownloadStringResult(downloaded_string='')
def download_history_string(
self,
metal: PreciousMetals) -> DownloadStringResult:
""" Downloads history data for one instrument as string.
:param metal: Precious Metal.
:return: Container with downloaded string.
"""
self.downloader.parameters = self.params
self.downloader.headers = self.headers
return self.downloader.download_string(self.history_base_url + metal.value + '.json')
class LbmaDownloadParameterValuesStorage(DownloadParameterValuesStorage):
""" Storage of instrument download parameters.
"""
def is_dynamic_enum_type(self, cls: type) -> bool:
return False
def get_all_managed_types(self) -> typing.Iterable[typing.Type]:
return ()
def get_dynamic_enum_key(self, instance):
return None
class LbmaApiActualityChecker(ApiActualityChecker):
""" Verifies actuality and accessibility of REST API of www.lbma.org.uk
"""
_metal_to_check = PreciousMetals.GOLD_AM
_date_to_check = datetime.date(1968, 1, 2)
_currency_to_check = Currencies.USD
_expected_value = decimal.Decimal('35.18')
def __init__(
self,
string_data_downloader: LbmaStringDataDownloader,
history_values_parser: LbmaHistoryJsonParser):
self.logger = logging.getLogger(__name__ + '.' + self.__class__.__name__)
self.string_data_downloader = string_data_downloader
self.history_values_parser = history_values_parser
def check(self):
self.logger.info("Check actuality via history")
history_download_parameters = LbmaPreciousMetalHistoryDownloadParameters(
metal=self._metal_to_check,
currency=self._currency_to_check)
history_data_string_result = self.string_data_downloader.download_history_string(
metal=self._metal_to_check)
self.logger.debug(f"Got history data:\n{history_data_string_result.downloaded_string}")
self.history_values_parser.download_parameters = history_download_parameters
try:
history_data = tuple(self.history_values_parser.parse(
history_data_string_result.downloaded_string,
tzinfo=None))
except ParseError as ex:
history_data_string_result.set_correctness(False)
raise CheckApiActualityError(f"Unexpected history data: {ex.message}") from ex
except Exception:
history_data_string_result.set_correctness(False)
raise
for metal_price in history_data:
if metal_price.date == self._date_to_check \
and metal_price.value == self._expected_value:
self.logger.info("Actuality check was successful")
return
history_data_string_result.set_correctness(False)
raise CheckApiActualityError(
f"Not found expected history value for {self._metal_to_check!r}")
class LbmaExporterFactory(InstrumentExporterFactory):
""" Factory class for create instances of LBMA data exporter.
"""
name: str = 'LBMA. Version 2021'
provider_site: str = 'https://www.ishares.com/us'
api_url: str = 'https://www.ishares.com/us/products/etf-investments'
def __init__(self):
self._dynamic_enum_type_manager = None
self._download_parameters_factory = None
def create_history_values_exporter(self, downloader: Downloader) -> InstrumentHistoryValuesExporter:
string_data_downloader = LbmaStringDataDownloader(downloader)
history_values_parser = LbmaHistoryJsonParser()
return GenericInstrumentHistoryValuesExporter(string_data_downloader, history_values_parser)
def create_info_exporter(self, downloader: Downloader) -> InstrumentsInfoExporter:
string_data_downloader = LbmaStringDataDownloader(downloader)
info_parser = LbmaInfoParser()
return GenericInstrumentsInfoExporter(string_data_downloader, info_parser)
def create_download_parameter_values_storage(self, downloader: Downloader) -> LbmaDownloadParameterValuesStorage:
return LbmaDownloadParameterValuesStorage()
def create_api_actuality_checker(self, downloader: Downloader) -> LbmaApiActualityChecker:
string_data_downloader = LbmaStringDataDownloader(downloader)
history_values_parser = LbmaHistoryJsonParser()
return LbmaApiActualityChecker(
string_data_downloader,
history_values_parser)
@property
def dynamic_enum_type_manager(self) -> LbmaDownloadParameterValuesStorage:
if self._dynamic_enum_type_manager is None:
self._dynamic_enum_type_manager = LbmaDownloadParameterValuesStorage()
return self._dynamic_enum_type_manager
@property
def download_parameters_factory(self) -> LbmaDownloadParametersFactory:
if self._download_parameters_factory is None:
self._download_parameters_factory = LbmaDownloadParametersFactory()
return self._download_parameters_factory
|
/sane_finances-2.0-py3-none-any.whl/sane_finances/sources/lbma/v2021/exporters.py
| 0.654784 | 0.191687 |
exporters.py
|
pypi
|
import dataclasses
import datetime
import decimal
import enum
import typing
from ...base import (
InstrumentValue, InstrumentInfo, InstrumentValueProvider, InstrumentInfoProvider,
InstrumentHistoryDownloadParameters, DownloadParametersFactory)
from ...inspection import InstrumentInfoParameter
from ....annotations import LEGACY_ANNOTATIONS
if LEGACY_ANNOTATIONS: # pragma: no cover
from ....annotations import Annotated
else: # pragma: no cover
from typing import Annotated # pylint: disable=no-name-in-module
class HistoryFieldNames(enum.Enum):
""" History JSON field names
"""
DATE = 'd'
VALUE = 'v'
class PricePeriods(enum.Enum):
""" Precious Metal Prices periods
"""
ANTE_MERIDIEM = 'am'
POST_MERIDIEM = 'pm'
class PreciousMetals(enum.Enum):
""" Precious Metals
"""
def __new__(cls, value: str, description: str, period: typing.Optional[PricePeriods] = None):
obj = object.__new__(cls)
obj._value_ = value
obj.description = str(description)
obj.period = None if period is None else PricePeriods(period)
return obj
def __repr__(self):
return f"<{self.__class__.__name__}.{self.name}: " \
f"'{self.value}', '{self.period}'>"
GOLD_AM = ('gold_am', 'Gold AM', PricePeriods.ANTE_MERIDIEM)
GOLD_PM = ('gold_pm', 'Gold PM', PricePeriods.POST_MERIDIEM)
SILVER = ('silver', 'Silver', None)
PLATINUM_AM = ('platinum_am', 'Platinum AM', PricePeriods.ANTE_MERIDIEM)
PLATINUM_PM = ('platinum_pm', 'Platinum PM', PricePeriods.POST_MERIDIEM)
PALLADIUM_AM = ('palladium_am', 'Palladium AM', PricePeriods.ANTE_MERIDIEM)
PALLADIUM_PM = ('palladium_pm', 'Palladium PM', PricePeriods.POST_MERIDIEM)
class Currencies(enum.Enum):
""" Precious Metal Prices currencies
"""
def __new__(cls, value: str, history_position: int):
obj = object.__new__(cls)
obj._value_ = value
obj.history_position = int(history_position)
return obj
def __repr__(self):
return f"<{self.__class__.__name__}.{self.name}: " \
f"'{self.value}', '{self.history_position}'>"
USD = ('USD', 0)
GBP = ('GBP', 1)
EUR = ('EUR', 2)
@dataclasses.dataclass
class PreciousMetalPrice(InstrumentValueProvider):
""" Container for Precious Metal Price.
"""
date: datetime.date
value: decimal.Decimal
def __init__(self,
*,
date: datetime.date,
value: decimal.Decimal):
if not isinstance(date, datetime.date):
raise TypeError("'date' is not datetime.date")
self.date = date
self.value = decimal.Decimal(value)
def __str__(self):
return (f"LBMA Precious Metal Price("
f"date={self.date.isoformat()}, "
f"value={self.value})")
def get_instrument_value(self, tzinfo: typing.Optional[datetime.timezone]) -> InstrumentValue:
moment = datetime.datetime.combine(self.date, datetime.time.min, tzinfo)
return InstrumentValue(value=self.value, moment=moment)
@dataclasses.dataclass
class PreciousMetalInfo(InstrumentInfoProvider):
""" Container for Precious Metal information.
"""
metal: PreciousMetals
def __init__(self,
*,
metal: PreciousMetals):
self.metal = PreciousMetals(metal) # pylint: disable=no-value-for-parameter
def __str__(self):
return f"LBMA Precious Metal(metal={self.metal})"
@property
def instrument_info(self) -> InstrumentInfo:
return InstrumentInfo(code=self.metal.value, name=self.metal.description)
class LbmaPreciousMetalInfoDownloadParameters(typing.NamedTuple):
""" Container for ``LbmaStringDataDownloader.download_instruments_info_string parameters``.
"""
@classmethod
def safe_create(
cls: typing.Type['LbmaPreciousMetalInfoDownloadParameters']) -> 'LbmaPreciousMetalInfoDownloadParameters':
""" Create new instance of ``ISharesInstrumentInfoDownloadParameters`` with arguments check.
:return: ``LbmaPreciousMetalInfoDownloadParameters`` instance.
"""
return cls()
@dataclasses.dataclass
class LbmaPreciousMetalHistoryDownloadParameters(InstrumentHistoryDownloadParameters):
""" Container for ``LbmaStringDataDownloader.download_instrument_history_string parameters``.
"""
metal: Annotated[PreciousMetals, InstrumentInfoParameter(instrument_identity=True)]
currency: Currencies
def clone_with_instrument_info_parameters(
self,
info_download_parameters: typing.Optional[LbmaPreciousMetalInfoDownloadParameters],
instrument_info: typing.Optional[PreciousMetalInfo]) -> 'LbmaPreciousMetalHistoryDownloadParameters':
return LbmaPreciousMetalHistoryDownloadParameters.generate_from(self, info_download_parameters, instrument_info)
# noinspection PyUnusedLocal
@classmethod
def generate_from(
cls: typing.Type['LbmaPreciousMetalHistoryDownloadParameters'],
history_download_parameters: typing.Optional['LbmaPreciousMetalHistoryDownloadParameters'],
info_download_parameters: typing.Optional[LbmaPreciousMetalInfoDownloadParameters],
instrument_info: typing.Optional[PreciousMetalInfo]) -> 'LbmaPreciousMetalHistoryDownloadParameters':
""" Create new history download parameters instance with data from its arguments.
:param history_download_parameters: Optional instrument history download parameters for cloning.
:param info_download_parameters: Optional instrument info download parameters for cloning.
:param instrument_info: Optional instrument info for cloning.
:return: Cloned history download parameters instance (self) with replacing some attributes from
`info_download_parameters` and `instrument_info`.
"""
return cls(
metal=(
(None if history_download_parameters is None else history_download_parameters.metal)
if instrument_info is None
else instrument_info.metal),
currency=(None if history_download_parameters is None else history_download_parameters.currency))
@classmethod
def safe_create(
cls: typing.Type['LbmaPreciousMetalHistoryDownloadParameters'],
*,
metal: PreciousMetals,
currency: Currencies) -> 'LbmaPreciousMetalHistoryDownloadParameters':
""" Create new instance of ``LbmaPreciousMetalHistoryDownloadParameters`` with arguments check.
:param metal: Precious Metal.
:param currency: Currency
:return: ``LbmaPreciousMetalHistoryDownloadParameters`` instance.
"""
if not isinstance(metal, PreciousMetals):
raise TypeError(f"'metal' is not PreciousMetals: {metal!r}")
if not isinstance(currency, Currencies):
raise TypeError(f"'currency' is not Currencies: {currency!r}")
return cls(metal=metal, currency=currency)
class LbmaDownloadParametersFactory(DownloadParametersFactory):
""" Download parameters factories and generators for LBMA.
"""
@property
def download_history_parameters_class(self) -> typing.Type[LbmaPreciousMetalHistoryDownloadParameters]:
return LbmaPreciousMetalHistoryDownloadParameters
@property
def download_history_parameters_factory(self) -> typing.Callable[..., LbmaPreciousMetalHistoryDownloadParameters]:
return LbmaPreciousMetalHistoryDownloadParameters.safe_create
@property
def download_info_parameters_class(self):
return LbmaPreciousMetalInfoDownloadParameters
@property
def download_info_parameters_factory(self) -> typing.Callable[..., typing.Any]:
return LbmaPreciousMetalInfoDownloadParameters.safe_create
def generate_history_download_parameters_from(
self,
history_download_parameters: typing.Optional[LbmaPreciousMetalHistoryDownloadParameters],
info_download_parameters: typing.Optional[LbmaPreciousMetalInfoDownloadParameters],
instrument_info: typing.Optional[PreciousMetalInfo]) -> LbmaPreciousMetalHistoryDownloadParameters:
return LbmaPreciousMetalHistoryDownloadParameters.generate_from(
history_download_parameters,
info_download_parameters,
instrument_info)
|
/sane_finances-2.0-py3-none-any.whl/sane_finances/sources/lbma/v2021/meta.py
| 0.769254 | 0.171963 |
meta.py
|
pypi
|
import decimal
import typing
import datetime
import enum
import dataclasses
from ...base import (
InstrumentValue, InstrumentInfo, InstrumentValueProvider, InstrumentInfoProvider,
InstrumentHistoryDownloadParameters, DownloadParametersFactory)
from ...inspection import InstrumentInfoParameter
from ....annotations import LEGACY_ANNOTATIONS
if LEGACY_ANNOTATIONS: # pragma: no cover
from ....annotations import Annotated
else: # pragma: no cover
from typing import Annotated # pylint: disable=no-name-in-module
class RateFrequencies(enum.Enum):
""" Frequencies of rate changing.
"""
def __new__(cls, value: str, description: str):
obj = object.__new__(cls)
obj._value_ = value
obj.description = str(description)
return obj
def __repr__(self):
return f"<{self.__class__.__name__}.{self.name}: " \
f"'{self.value}' ('{self.description}')>"
DAILY = ('0', 'Daily')
MONTHLY = ('1', 'Monthly')
@dataclasses.dataclass
class CurrencyRateValue(InstrumentValueProvider):
""" Container for currency rate history item.
"""
date: datetime.date
value: decimal.Decimal
nominal: int
currency_id: str
def __init__(self,
*,
date: datetime.date,
value: decimal.Decimal,
nominal: int,
currency_id: str):
if not isinstance(date, datetime.date):
raise TypeError("value_date is not date")
self.date = date
self.value = decimal.Decimal(value)
self.nominal = int(nominal)
self.currency_id = str(currency_id)
def get_instrument_value(self, tzinfo: typing.Optional[datetime.timezone]) -> InstrumentValue:
return InstrumentValue(
value=self.value/self.nominal,
moment=datetime.datetime.combine(self.date, datetime.time.min, tzinfo=tzinfo))
@dataclasses.dataclass
class CurrencyInfo(InstrumentInfoProvider):
""" Container for currency information.
"""
currency_id: str
name: str
eng_name: str
nominal: int
parent_code: str
def __init__(self, *, currency_id: str, name: str, eng_name: str, nominal: int, parent_code: str):
self.currency_id = str(currency_id)
self.name = str(name)
self.eng_name = str(eng_name)
self.nominal = int(nominal)
self.parent_code = str(parent_code)
def __str__(self):
return (f"CBR currency ("
f"currency_id={self.currency_id}, "
f"name={self.name}, "
f"eng_name={self.eng_name}, "
f"nominal={self.nominal}, "
f"parent_code={self.parent_code})")
@property
def instrument_info(self) -> InstrumentInfo:
return InstrumentInfo(code=self.currency_id, name=self.name)
class CbrCurrenciesInfoDownloadParameters(typing.NamedTuple):
""" Container for CbrStringDataDownloader.download_instruments_info_string parameters.
"""
rate_frequency: RateFrequencies
@classmethod
def safe_create(
cls: typing.Type['CbrCurrenciesInfoDownloadParameters'],
*,
rate_frequency: RateFrequencies) -> 'CbrCurrenciesInfoDownloadParameters':
""" Create new instance of ``CbrCurrenciesInfoDownloadParameters`` with arguments check.
:param rate_frequency: ``RateFrequencies`` value.
:return: ``CbrCurrenciesInfoDownloadParameters`` instance.
"""
if not isinstance(rate_frequency, RateFrequencies):
raise TypeError(f"'rate_frequency' is not RateFrequencies: {rate_frequency!r}")
# see https://github.com/PyCQA/pylint/issues/1801 for pylint disable hint details
return cls(rate_frequency=RateFrequencies(rate_frequency)) # pylint: disable=no-value-for-parameter
@dataclasses.dataclass
class CbrCurrencyHistoryDownloadParameters(InstrumentHistoryDownloadParameters):
""" Container for ``CbrStringDataDownloader.download_instrument_history_string`` parameters.
"""
currency_id: Annotated[str, InstrumentInfoParameter(instrument_identity=True)]
def clone_with_instrument_info_parameters(
self,
info_download_parameters: typing.Optional[CbrCurrenciesInfoDownloadParameters],
instrument_info: typing.Optional[CurrencyInfo]) -> 'CbrCurrencyHistoryDownloadParameters':
return CbrCurrencyHistoryDownloadParameters.generate_from(self, info_download_parameters, instrument_info)
# noinspection PyUnusedLocal
@classmethod
def generate_from(
cls: typing.Type['CbrCurrencyHistoryDownloadParameters'],
history_download_parameters: typing.Optional['CbrCurrencyHistoryDownloadParameters'],
info_download_parameters: typing.Optional[CbrCurrenciesInfoDownloadParameters],
instrument_info: typing.Optional[CurrencyInfo]) -> 'CbrCurrencyHistoryDownloadParameters':
""" Create new history download parameters instance with data from its arguments.
:param history_download_parameters: Optional instrument history download parameters for cloning.
:param info_download_parameters: Optional instrument info download parameters for cloning.
:param instrument_info: Optional instrument info for cloning.
:return: Cloned history download parameters instance (self) with replacing some attributes from
`info_download_parameters` and `instrument_info`.
"""
return cls(
currency_id=((None if history_download_parameters is None else history_download_parameters.currency_id)
if instrument_info is None
else instrument_info.currency_id)
)
@classmethod
def safe_create(
cls: typing.Type['CbrCurrencyHistoryDownloadParameters'],
*,
currency_id: str) -> 'CbrCurrencyHistoryDownloadParameters':
""" Create new instance of ``CbrCurrencyHistoryDownloadParameters`` with arguments check.
:param currency_id: Currency ID value.
:return: ``CbrCurrencyHistoryDownloadParameters`` instance.
"""
return cls(currency_id=str(currency_id))
class CbrDownloadParametersFactory(DownloadParametersFactory):
""" Download parameters factories and generators for Central Bank of Russia.
"""
@property
def download_history_parameters_class(self) -> typing.Type[CbrCurrencyHistoryDownloadParameters]:
return CbrCurrencyHistoryDownloadParameters
@property
def download_history_parameters_factory(self) -> typing.Callable[..., CbrCurrencyHistoryDownloadParameters]:
return CbrCurrencyHistoryDownloadParameters.safe_create
@property
def download_info_parameters_class(self):
return CbrCurrenciesInfoDownloadParameters
@property
def download_info_parameters_factory(self) -> typing.Callable[..., typing.Any]:
return CbrCurrenciesInfoDownloadParameters.safe_create
def generate_history_download_parameters_from(
self,
history_download_parameters: typing.Optional[CbrCurrencyHistoryDownloadParameters],
info_download_parameters: typing.Optional[CbrCurrenciesInfoDownloadParameters],
instrument_info: typing.Optional[CurrencyInfo]) -> CbrCurrencyHistoryDownloadParameters:
return CbrCurrencyHistoryDownloadParameters.generate_from(
history_download_parameters,
info_download_parameters,
instrument_info)
|
/sane_finances-2.0-py3-none-any.whl/sane_finances/sources/cbr/v2016/meta.py
| 0.784526 | 0.186484 |
meta.py
|
pypi
|
import datetime
import inspect
import logging
import typing
import urllib.parse
from .meta import (
Timeframes, Intervals,
BloombergInfoDownloadParameters, BloombergDownloadParametersFactory,
BloombergHistoryDownloadParameters)
from .parsers import BloombergInfoJsonParser, BloombergHistoryJsonParser
from ...base import (
ApiActualityChecker, InstrumentStringDataDownloader, ParseError,
InstrumentExporterFactory, InstrumentHistoryValuesExporter, InstrumentsInfoExporter,
DownloadParameterValuesStorage, DownloadStringResult,
CheckApiActualityError)
from ...generic import GenericInstrumentHistoryValuesExporter, GenericInstrumentsInfoExporter
from ....communication.downloader import Downloader
logging.getLogger().addHandler(logging.NullHandler())
class BloombergStringDataDownloader(InstrumentStringDataDownloader):
""" Data downloader from www.bloomberg.com
"""
_history_url_pattern = 'https://www.bloomberg.com/markets2/api/history/%s/PX_LAST'
info_url = 'https://search.bloomberg.com/lookup.json'
def __init__(self, downloader: Downloader):
self.logger = logging.getLogger(__name__ + '.' + self.__class__.__name__)
self.downloader = downloader
# headers for HTTP
self.headers: typing.Dict[str, str] = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0) Gecko/20100101 Firefox/97.0'
}
self.search_types = 'Company_Public,Index,Fund,Currency,Commodity,Bond'
def download_instrument_history_string(
self,
parameters: BloombergHistoryDownloadParameters,
moment_from: datetime.datetime,
moment_to: datetime.datetime) -> DownloadStringResult:
return self.download_history_string(parameters.ticker, parameters.timeframe, parameters.interval)
def download_instruments_info_string(
self,
parameters: BloombergInfoDownloadParameters) -> DownloadStringResult:
return self.download_info_string(search_string=parameters.search_string)
def download_history_string(
self,
ticker: str,
timeframe: Timeframes,
interval: Intervals) -> DownloadStringResult:
""" Downloads history data for one instrument as string.
:param ticker: Ticker.
:param timeframe: Timeframe to load.
:param interval: Interval type to load.
:return: Container with downloaded string.
"""
params = [
('timeframe', str(timeframe.value)),
('period', str(interval.value))
]
self.downloader.parameters = params
self.downloader.headers = self.headers
url = self._history_url_pattern % urllib.parse.quote(str(ticker))
return self.downloader.download_string(url)
def download_info_string(
self,
search_string: str) -> DownloadStringResult:
""" Downloads the list of all available instruments by specified parameters.
:param search_string: Search string
:return: Container with downloaded string.
"""
self.downloader.headers = self.headers
self.downloader.parameters = [
('query', str(search_string)),
('types', self.search_types)
]
return self.downloader.download_string(self.info_url)
class BloombergDownloadParameterValuesStorage(DownloadParameterValuesStorage):
""" Storage of instruments download parameters.
"""
def __init__(self):
self.logger = logging.getLogger(__name__ + '.' + self.__class__.__name__)
self._special_handlers: typing.Dict[type, typing.Callable] = {
Timeframes: self._get_timeframes_choices,
Intervals: self._get_intervals_choices
}
def is_dynamic_enum_type(self, cls: type) -> bool:
return False
def get_all_managed_types(self) -> typing.Iterable[typing.Type]:
return ()
def get_dynamic_enum_key(self, instance):
return None
def get_parameter_type_choices(self, cls: type) \
-> typing.Optional[
typing.List[typing.Tuple[typing.Any, typing.Union[str, typing.List[typing.Tuple[typing.Any, str]]]]]
]:
if not inspect.isclass(cls):
return None
if cls not in self._special_handlers:
return None
return self._special_handlers[cls]()
@staticmethod
def _get_timeframes_choices():
timeframe: Timeframes
return [(timeframe.value, timeframe.description) # pylint: disable=undefined-variable
for timeframe
in Timeframes]
@staticmethod
def _get_intervals_choices():
interval: Intervals
return [(interval.value, interval.value) # pylint: disable=undefined-variable
for interval
in Intervals]
class BloombergApiActualityChecker(ApiActualityChecker):
""" Verifies actuality and accessibility of REST API of www.bloomberg.com
"""
search_string_to_check = 'DJ' # Suppose Dow Jones always exists
ticker_to_check = 'I28893:IND' # Some instrument with available history
timeframe_to_check = Timeframes.FIVE_YEARS
interval_to_check = Intervals.DAILY
def __init__(
self,
string_data_downloader: BloombergStringDataDownloader,
info_parser: BloombergInfoJsonParser,
history_values_parser: BloombergHistoryJsonParser):
self.logger = logging.getLogger(__name__ + '.' + self.__class__.__name__)
self.string_data_downloader = string_data_downloader
self.history_values_parser = history_values_parser
self.info_parser = info_parser
def check(self):
self.logger.info("Check actuality via instruments list")
info_string_result = self.string_data_downloader.download_info_string(
search_string=self.search_string_to_check)
self.logger.debug(f"Got JSON data:\n{info_string_result.downloaded_string}")
# read all available instruments
try:
_ = tuple(self.info_parser.parse(info_string_result.downloaded_string))
except ParseError as ex:
info_string_result.set_correctness(False)
raise CheckApiActualityError(f"Unexpected indexes info JSON: {ex.message}") from ex
except Exception:
info_string_result.set_correctness(False)
raise
# now test history data
self.logger.info(f"Check actuality via ticker {self.ticker_to_check!r}")
history_data_string_result = self.string_data_downloader.download_history_string(
ticker=self.ticker_to_check,
timeframe=self.timeframe_to_check,
interval=self.interval_to_check)
self.logger.debug(f"Got JSON data:\n{history_data_string_result.downloaded_string}")
try:
history_data = tuple(self.history_values_parser.parse(
history_data_string_result.downloaded_string,
tzinfo=None))
except ParseError as ex:
history_data_string_result.set_correctness(False)
raise CheckApiActualityError(f"Unexpected indexes history JSON: {ex.message}") from ex
except Exception:
history_data_string_result.set_correctness(False)
raise
if not history_data:
history_data_string_result.set_correctness(False)
raise CheckApiActualityError(
f"Not found history values for {self.ticker_to_check!r}")
self.logger.info("Actuality check was successful")
class BloombergExporterFactory(InstrumentExporterFactory):
""" Factory class for create instances of Bloomberg data exporter.
"""
name: str = 'Bloomberg. Version 2021'
provider_site: str = 'https://www.bloomberg.com/'
api_url: str = 'https://www.bloomberg.com/'
def __init__(self):
self._dynamic_enum_type_manager = None
self._download_parameters_factory = None
def create_history_values_exporter(self, downloader: Downloader) -> InstrumentHistoryValuesExporter:
string_data_downloader = BloombergStringDataDownloader(downloader)
history_values_parser = BloombergHistoryJsonParser()
return GenericInstrumentHistoryValuesExporter(string_data_downloader, history_values_parser)
def create_info_exporter(self, downloader: Downloader) -> InstrumentsInfoExporter:
string_data_downloader = BloombergStringDataDownloader(downloader)
info_parser = BloombergInfoJsonParser()
return GenericInstrumentsInfoExporter(string_data_downloader, info_parser)
def create_download_parameter_values_storage(
self, downloader: Downloader) -> BloombergDownloadParameterValuesStorage:
return BloombergDownloadParameterValuesStorage()
def create_api_actuality_checker(self, downloader: Downloader) -> BloombergApiActualityChecker:
string_data_downloader = BloombergStringDataDownloader(downloader)
history_values_parser = BloombergHistoryJsonParser()
info_parser = BloombergInfoJsonParser()
return BloombergApiActualityChecker(
string_data_downloader,
info_parser,
history_values_parser)
@property
def dynamic_enum_type_manager(self) -> BloombergDownloadParameterValuesStorage:
if self._dynamic_enum_type_manager is None:
self._dynamic_enum_type_manager = BloombergDownloadParameterValuesStorage()
return self._dynamic_enum_type_manager
@property
def download_parameters_factory(self) -> BloombergDownloadParametersFactory:
if self._download_parameters_factory is None:
self._download_parameters_factory = BloombergDownloadParametersFactory()
return self._download_parameters_factory
|
/sane_finances-2.0-py3-none-any.whl/sane_finances/sources/bloomberg/v2021/exporters.py
| 0.688364 | 0.156749 |
exporters.py
|
pypi
|
import dataclasses
import datetime
import decimal
import enum
import typing
from ...base import (
InstrumentValue, InstrumentInfo, InstrumentValueProvider, InstrumentInfoProvider,
InstrumentHistoryDownloadParameters, DownloadParametersFactory)
from ...inspection import InstrumentInfoParameter
from ....annotations import LEGACY_ANNOTATIONS
if LEGACY_ANNOTATIONS: # pragma: no cover
from ....annotations import Annotated
else: # pragma: no cover
from typing import Annotated # pylint: disable=no-name-in-module
class HistoryFieldNames(enum.Enum):
""" Field names in history JSON.
"""
TICKER = 'ticker'
PRICE = 'price'
DATE_TIME = 'dateTime'
VALUE = 'value'
class InfoFieldNames(enum.Enum):
""" Field names in info JSON.
"""
RESULTS = 'results'
TICKER_SYMBOL = 'ticker_symbol'
NAME = 'name'
COUNTRY = 'country'
RESOURCE_TYPE = 'resource_type'
RESOURCE_ID = 'resource_id'
SECURITY_TYPE = 'security_type'
URL = 'url'
class Timeframes(enum.Enum):
""" Timeframes of history data
"""
def __new__(cls, value: str, description: str):
obj = object.__new__(cls)
obj._value_ = value
obj.description = str(description)
return obj
def __repr__(self):
return f"<{self.__class__.__name__}.{self.name}: " \
f"'{self.value}' ('{self.description}')>"
ONE_DAY = ('1_DAY', 'One day')
ONE_WEEK = ('1_WEEK', 'One week')
ONE_MONTH = ('1_MONTH', 'One month')
SIX_MONTHS = ('6_MONTH', 'Six months')
YEAR_TO_DATE = ('YTD', 'Year-to-date')
ONE_YEAR = ('1_YEAR', 'One year')
FIVE_YEARS = ('5_YEAR', 'Five years')
class Intervals(enum.Enum):
""" Intervals of history data
"""
DAILY = 'daily'
WEEKLY = 'weekly'
MONTHLY = 'monthly'
@dataclasses.dataclass
class InstrumentPrice(InstrumentValueProvider):
""" Container for instrument history value.
"""
ticker: str
price_date: datetime.date
price_value: decimal.Decimal
def __init__(self,
*,
ticker: str,
price_date: datetime.date,
price_value: decimal.Decimal):
if not isinstance(price_date, datetime.date):
raise TypeError("'price_date' is not date")
self.ticker = ticker
self.price_date = price_date
self.price_value = decimal.Decimal(price_value)
def __str__(self):
return (f"Bloomberg price ("
f"ticker={self.ticker}, "
f"price_date={self.price_date.isoformat()}, "
f"price_value={self.price_value})")
def get_instrument_value(self, tzinfo: typing.Optional[datetime.timezone]) -> InstrumentValue:
moment = datetime.datetime.combine(self.price_date, datetime.time.min, tzinfo)
return InstrumentValue(value=self.price_value, moment=moment)
@dataclasses.dataclass
class BloombergInstrumentInfo(InstrumentInfoProvider):
""" Container for instrument information.
"""
ticker_symbol: str
name: str
country: typing.Optional[str]
resource_type: typing.Optional[str]
resource_id: typing.Optional[str]
security_type: typing.Optional[str]
url: typing.Optional[str]
def __init__(
self,
*,
ticker_symbol: str,
name: str,
country: typing.Optional[str],
resource_type: typing.Optional[str],
resource_id: typing.Optional[str],
security_type: typing.Optional[str],
url: typing.Optional[str]):
self.ticker_symbol = str(ticker_symbol)
self.name = str(name)
self.country = None if country is None else str(country)
self.resource_type = None if resource_type is None else str(resource_type)
self.resource_id = None if resource_id is None else str(resource_id)
self.security_type = None if security_type is None else str(security_type)
self.url = None if url is None else str(url)
def __str__(self):
return (f"Bloomberg instrument("
f"ticker_symbol={self.ticker_symbol}, "
f"name={self.name}, "
f"country={self.country}, "
f"resource_type={self.resource_type}, "
f"resource_id={self.resource_id}, "
f"security_type={self.security_type}, "
f"url={self.url})")
@property
def instrument_info(self) -> InstrumentInfo:
return InstrumentInfo(code=self.ticker_symbol, name=self.name)
class BloombergInfoDownloadParameters(typing.NamedTuple):
""" Container for ``BloombergStringDataDownloader.download_instruments_info_string parameters``.
"""
search_string: str
@classmethod
def safe_create(
cls: typing.Type['BloombergInfoDownloadParameters'],
*,
search_string: str) -> 'BloombergInfoDownloadParameters':
""" Create new instance of ``BloombergInfoDownloadParameters`` with arguments check.
:param search_string: Search string.
:return: ``BloombergInfoDownloadParameters`` instance.
"""
return cls(search_string=str(search_string))
@dataclasses.dataclass
class BloombergHistoryDownloadParameters(InstrumentHistoryDownloadParameters):
""" Container for ``BloombergStringDataDownloader.download_instrument_history_string parameters``.
"""
ticker: Annotated[str, InstrumentInfoParameter(instrument_identity=True)]
timeframe: Timeframes
interval: Intervals
def clone_with_instrument_info_parameters(
self,
info_download_parameters: typing.Optional[BloombergInfoDownloadParameters],
instrument_info: typing.Optional[BloombergInstrumentInfo]) -> 'BloombergHistoryDownloadParameters':
return BloombergHistoryDownloadParameters.generate_from(self, info_download_parameters, instrument_info)
# noinspection PyUnusedLocal
@classmethod
def generate_from(
cls: typing.Type['BloombergHistoryDownloadParameters'],
history_download_parameters: typing.Optional['BloombergHistoryDownloadParameters'],
info_download_parameters: typing.Optional[BloombergInfoDownloadParameters],
instrument_info: typing.Optional[BloombergInstrumentInfo]) -> 'BloombergHistoryDownloadParameters':
""" Create new history download parameters instance with data from its arguments.
:param history_download_parameters: Optional instrument history download parameters for cloning.
:param info_download_parameters: Optional instrument info download parameters for cloning.
:param instrument_info: Optional instrument info for cloning.
:return: Cloned history download parameters instance (self) with replacing some attributes from
`info_download_parameters` and `instrument_info`.
"""
return cls(
ticker=((None if history_download_parameters is None else history_download_parameters.ticker)
if instrument_info is None
else instrument_info.ticker_symbol),
timeframe=(None if history_download_parameters is None else history_download_parameters.timeframe),
interval=(None if history_download_parameters is None else history_download_parameters.interval)
)
@classmethod
def safe_create(
cls: typing.Type['BloombergHistoryDownloadParameters'],
*,
ticker: str,
timeframe: Timeframes,
interval: Intervals) -> 'BloombergHistoryDownloadParameters':
""" Create new instance of ``BloombergHistoryDownloadParameters`` with arguments check.
:param ticker: Instrument ticker.
:param timeframe: Timeframe for download.
:param interval: Interval type.
:return: ``BloombergHistoryDownloadParameters`` instance.
"""
if not isinstance(timeframe, Timeframes):
raise TypeError(f"'timeframe' is not Timeframes: {timeframe!r}")
if not isinstance(interval, Intervals):
raise TypeError(f"'interval' is not Intervals: {interval!r}")
return cls(
ticker=str(ticker),
timeframe=timeframe,
interval=interval)
class BloombergDownloadParametersFactory(DownloadParametersFactory):
""" Download parameters factories and generators for Bloomberg.
"""
@property
def download_history_parameters_class(self) -> typing.Type[BloombergHistoryDownloadParameters]:
return BloombergHistoryDownloadParameters
@property
def download_history_parameters_factory(self) -> typing.Callable[..., BloombergHistoryDownloadParameters]:
return BloombergHistoryDownloadParameters.safe_create
@property
def download_info_parameters_class(self):
return BloombergInfoDownloadParameters
@property
def download_info_parameters_factory(self) -> typing.Callable[..., typing.Any]:
return BloombergInfoDownloadParameters.safe_create
def generate_history_download_parameters_from(
self,
history_download_parameters: typing.Optional[BloombergHistoryDownloadParameters],
info_download_parameters: typing.Optional[BloombergInfoDownloadParameters],
instrument_info: typing.Optional[BloombergInstrumentInfo]) -> BloombergHistoryDownloadParameters:
return BloombergHistoryDownloadParameters.generate_from(
history_download_parameters,
info_download_parameters,
instrument_info)
|
/sane_finances-2.0-py3-none-any.whl/sane_finances/sources/bloomberg/v2021/meta.py
| 0.69946 | 0.153899 |
meta.py
|
pypi
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Utilities for parse data from app2.msci.com
"""
import decimal
import json
import logging
import typing
import datetime
from .meta import (
IndexValue, IndexInfo, Market, Size, Style, IndexSuite, IndexSuiteGroup, Frequency,
Scopes, IndexLevel, Currency, IndexPanelData)
from ...base import (
InstrumentValuesHistoryParser, InstrumentInfoParser, ParseError, SourceDownloadError,
DownloadParameterValuesStorage, InstrumentValuesHistoryEmpty)
logging.getLogger().addHandler(logging.NullHandler())
class MsciHistoryJsonParser(InstrumentValuesHistoryParser):
""" Parser for history data of index from JSON string.
https://app2.msci.com/products/service/index/indexmaster/getLevelDataForGraph?currency_symbol=USD&index_variant=STRD&start_date=20170813&end_date=20210813&data_frequency=DAILY&index_codes=990300
E.g.::
{
"msci_index_code":"990300",
"index_variant_type":"STRD",
"ISO_currency_symbol":"USD",
"indexes":{
"INDEX_LEVELS":[
{"level_eod":1892.970699,"calc_date":20170609},
{"level_eod":1886.335805,"calc_date":20170612},
...
]
}
}
On error may return
::
{
"timestamp":"Aug 14, 2021 4:20:12 PM",
"user":"sys_x_bmapip01",
"error_code":" 100",
"error_message":" null Invalid Parameter end_date : '19691231. Calculation date cannot be earlier than 19970101'"
}
Or
::
{
"timestamp":"Aug 14, 2021 4:23:16 PM",
"user":"sys_x_bmapip01",
"error_code":" 300",
"error_message":" Invalid Parameter data_frequency : 'DAYLY'"
}
"""
date_format = '%Y%m%d'
def __init__(self, parameter_values_storage: DownloadParameterValuesStorage):
self.logger = logging.getLogger(__name__ + '.' + self.__class__.__name__)
self.parameter_values_storage = parameter_values_storage
def parse( # pylint: disable=arguments-renamed
self,
raw_json_text: str,
tzinfo: typing.Optional[datetime.timezone]
) -> typing.Iterable[IndexValue]:
try:
raw_data = json.loads(raw_json_text)
except json.decoder.JSONDecodeError as ex:
raise ParseError(ex.msg) from ex
if not isinstance(raw_data, dict):
# can be Inf etc., but we accept only {}
raise ParseError("Wrong JSON format. Top level is not dictionary.")
error_message = raw_data.get('error_message', None)
if error_message is not None:
raise SourceDownloadError(f"JSON contains error message: {error_message}")
msci_index_code = raw_data.get('msci_index_code', None)
if msci_index_code is None:
raise ParseError("Wrong JSON format. 'msci_index_code' not found.")
index_variant_type = raw_data.get('index_variant_type', None)
if index_variant_type is None:
raise ParseError("Wrong JSON format. 'index_variant_type' not found.")
iso_currency_symbol = raw_data.get('ISO_currency_symbol', None)
if iso_currency_symbol is None:
raise ParseError("Wrong JSON format. 'ISO_currency_symbol' not found.")
indexes_block = raw_data.get('indexes', None)
if indexes_block is None:
raise ParseError("Wrong JSON format. 'indexes' block not found.")
if not isinstance(indexes_block, dict):
raise ParseError("Wrong JSON format. 'indexes' block is not dictionary.")
index_levels_block = indexes_block.get('INDEX_LEVELS', None)
if index_levels_block is None:
raise ParseError("Wrong JSON format. 'INDEX_LEVELS' block not found.")
if not index_levels_block:
raise InstrumentValuesHistoryEmpty()
index_values_pairs: typing.List[typing.Tuple[decimal.Decimal, datetime.date]] = []
for index_value_block in index_levels_block:
if not isinstance(index_value_block, dict):
raise ParseError("Wrong JSON format. Item inside 'INDEX_LEVELS' block is not dictionary.")
level_eod = index_value_block.get('level_eod', None)
if level_eod is None:
raise ParseError("Wrong JSON format. 'level_eod' not found.")
calc_date = index_value_block.get('calc_date', None)
if calc_date is None:
raise ParseError("Wrong JSON format. 'calc_date' not found.")
if isinstance(level_eod, float):
# hack to adjust floating point digits
level_eod = repr(level_eod)
try:
level_eod = decimal.Decimal(level_eod)
except (ValueError, TypeError, decimal.DecimalException) as ex:
raise ParseError(f"Wrong JSON format. Can't convert {level_eod} to decimal.") from ex
try:
calc_date = datetime.datetime.strptime(str(calc_date), self.date_format)
except (ValueError, TypeError) as ex:
raise ParseError(f"Wrong JSON format. Can't convert {calc_date} to datetime.") from ex
index_values_pairs.append((level_eod, calc_date.date()))
index_level = self.parameter_values_storage.get_dynamic_enum_value_by_key(IndexLevel, index_variant_type)
if index_level is None:
raise ParseError(f"Index level {index_variant_type!r} not found")
currency = self.parameter_values_storage.get_dynamic_enum_value_by_key(Currency, iso_currency_symbol)
if currency is None:
raise ParseError(f"Currency {iso_currency_symbol!r} not found")
return (
IndexValue(calc_date=calc_date,
level_eod=level_eod,
msci_index_code=msci_index_code,
index_variant_type=index_level,
currency=currency)
for level_eod, calc_date
in index_values_pairs)
class MsciIndexInfoParser(InstrumentInfoParser):
""" Parser for indexes info list from JSON.
https://app2.msci.com/products/service/index/indexmaster/searchIndexes?index_market=24576&index_scope=Region&index_size=12&index_style=None&index_suite=C
E.g.::
{
"indexes":[
{"msci_index_code":903600,"index_name":"AUSTRALIA"},
{"msci_index_code":904000,"index_name":"AUSTRIA"},
...
]
}
"""
def __init__(self):
self.logger = logging.getLogger(__name__ + '.' + self.__class__.__name__)
def parse(self, raw_json_text: str) -> typing.Iterable[IndexInfo]: # pylint: disable=arguments-renamed
try:
raw_data = json.loads(raw_json_text)
except json.decoder.JSONDecodeError as ex:
raise ParseError(ex.msg) from ex
if not isinstance(raw_data, dict):
# can be Inf etc., but we accept only {}
raise ParseError("Wrong JSON format. Top level is not dictionary.")
indexes_block = raw_data.get('indexes', None)
if indexes_block is None:
raise ParseError("Wrong JSON format. 'indexes' block not found.")
index_info_pairs: typing.List[typing.Tuple[str, str]] = []
for index_info_block in indexes_block:
if not isinstance(index_info_block, dict):
raise ParseError("Wrong JSON format. Item inside 'indexes' block is not dictionary.")
msci_index_code = index_info_block.get('msci_index_code', None)
if msci_index_code is None:
raise ParseError("Wrong JSON format. 'msci_index_code' not found.")
index_name = index_info_block.get('index_name', None)
if index_name is None:
raise ParseError("Wrong JSON format. 'index_name' not found.")
index_info_pairs.append((msci_index_code, index_name))
return (
IndexInfo(msci_index_code=msci_index_code,
index_name=index_name)
for msci_index_code, index_name
in index_info_pairs)
class MsciIndexPanelDataJsonParser:
""" Parser for index panel data from JSON.
"""
all_country_market_id = '24576'
daily_frequency_id = 'DAILY'
monthly_frequency_id = 'END_OF_MONTH'
def __init__(self):
self.logger = logging.getLogger(__name__ + '.' + self.__class__.__name__)
self._index_suite_groups = {}
@staticmethod
def _parse_block(
block_name: str,
src_block: typing.Dict[str, typing.List]) -> typing.Iterable[typing.Tuple[str, str]]:
target_block = src_block.get(block_name, None)
if target_block is None:
raise ParseError(f"Wrong JSON format. {block_name!r} block not found.")
for pair_block in target_block:
if not isinstance(pair_block, dict):
raise ParseError(f"Wrong JSON format. Item inside {block_name!r} block is not dictionary.")
identity = pair_block.get('id', None)
if identity is None:
raise ParseError("Wrong JSON format. 'id' not found.")
name = pair_block.get('name', None)
if name is None:
raise ParseError("Wrong JSON format. 'name' not found.")
yield identity, name
def parse(self, raw_json_text: str) -> IndexPanelData:
""" Parse index panel data from JSON and return it.
:param raw_json_text: JSON string with index panel data.
:return: ``IndexPanelData`` instance.
"""
try:
raw_data = json.loads(raw_json_text)
except json.decoder.JSONDecodeError as ex:
raise ParseError(ex.msg) from ex
if not isinstance(raw_data, dict):
# can be Inf etc., but we accept only {}
raise ParseError("Wrong JSON format. Top level is not dictionary.")
markets = tuple(
Market.safe_create(
identity=identity,
name=name,
# 'All Country (DM+EM)' market available only in regional scope
scope=Scopes.REGIONAL if str(identity) == self.all_country_market_id else None)
for identity, name
in self._parse_block('market', raw_data))
currencies = tuple(
Currency.safe_create(identity=identity, name=name)
for identity, name
in self._parse_block('currency', raw_data))
index_levels = tuple(
IndexLevel.safe_create(identity=identity, name=name)
for identity, name
in self._parse_block('indexLevels', raw_data))
frequencies = tuple(
Frequency.safe_create(identity=identity, name=name)
for identity, name
in self._parse_block('frequency', raw_data))
sizes = tuple(
Size.safe_create(identity=identity, name=name)
for identity, name
in self._parse_block('size', raw_data))
styles = tuple(
Style.safe_create(identity=identity, name=name)
for identity, name
in self._parse_block('style', raw_data))
index_suites_block = raw_data.get('indexSuite', None)
if index_suites_block is None:
raise ParseError("Wrong JSON format. 'indexSuite' block not found.")
index_suite_groups: typing.List[IndexSuiteGroup] = []
index_suites: typing.List[IndexSuite] = []
for index_suites_block_item in index_suites_block:
optgroup = index_suites_block_item.get('optgroup', None)
if optgroup is None:
index_suites.extend(
IndexSuite.safe_create(identity=identity, name=name)
for identity, name
in self._parse_block('DUMMY', {'DUMMY': [index_suites_block_item]}))
else:
group = IndexSuiteGroup.safe_create(name=optgroup)
index_suite_groups.append(group)
index_suites.extend(
IndexSuite.safe_create(identity=identity, name=name, group=group)
for identity, name
in self._parse_block('options', index_suites_block_item))
# unfortunately, daily and monthly frequencies hard-coded in site scripts,
# so we forced to assume their codes here
frequencies_dict = {frequency.identity: frequency for frequency in frequencies}
if self.daily_frequency_id not in frequencies_dict:
raise ParseError(f"Frequency with id {self.daily_frequency_id!r} not found.")
if self.monthly_frequency_id not in frequencies_dict:
raise ParseError(f"Frequency with id {self.monthly_frequency_id!r} not found.")
return IndexPanelData(
markets=markets,
currencies=currencies,
index_levels=index_levels,
frequencies=frequencies,
index_suite_groups=tuple(index_suite_groups),
index_suites=tuple(index_suites),
sizes=sizes,
styles=styles,
daily_frequency=frequencies_dict[self.daily_frequency_id],
monthly_frequency=frequencies_dict[self.monthly_frequency_id])
|
/sane_finances-2.0-py3-none-any.whl/sane_finances/sources/msci/v2021/parsers.py
| 0.469277 | 0.161849 |
parsers.py
|
pypi
|
import decimal
import typing
import datetime
import enum
import dataclasses
from ...base import (
InstrumentValue, InstrumentInfo, InstrumentValueProvider, InstrumentInfoProvider,
InstrumentHistoryDownloadParameters, DownloadParametersFactory)
from ...inspection import InstrumentInfoParameter
from ....annotations import LEGACY_ANNOTATIONS
if LEGACY_ANNOTATIONS: # pragma: no cover
from ....annotations import Annotated
else: # pragma: no cover
from typing import Annotated # pylint: disable=no-name-in-module
class Scopes(enum.Enum):
""" Index scope.
"""
def __new__(cls, value: str, description: str):
obj = object.__new__(cls)
obj._value_ = value
obj.description = str(description)
return obj
def __repr__(self):
return f"<{self.__class__.__name__}.{self.name}: " \
f"'{self.value}' ('{self.description}')>"
REGIONAL = ('Region', 'Regional')
COUNTRY = ('Country', 'Country')
class Market(typing.NamedTuple):
""" Market from msci.com.
"""
identity: str
name: str
scope: typing.Optional[Scopes] = None
@classmethod
def safe_create(
cls: typing.Type['Market'],
*,
identity: str,
name: str,
scope: Scopes = None) -> 'Market':
""" Create new instance of ``Market`` with arguments check.
:param identity: Identity value.
:param name: Name.
:param scope: Scope. ``None`` if market is available for all scopes.
:return: ``Market`` instance.
"""
if scope is not None and not isinstance(scope, Scopes):
raise TypeError("'scope' is not Scopes")
# see https://github.com/PyCQA/pylint/issues/1801 for pylint disable hint details
return cls(
identity=str(identity),
name=str(name),
scope=None if scope is None else Scopes(scope)) # pylint: disable=no-value-for-parameter
class Currency(typing.NamedTuple):
""" Index currency.
The MSCI Country and Regional Indices are calculated in local currency as well as in USD.
The concept of a “local currency” calculation excludes the impact of currency fluctuations.
"""
identity: str
name: str
@classmethod
def safe_create(
cls: typing.Type['Currency'],
*,
identity: str,
name: str) -> 'Currency':
""" Create new instance of ``Currency`` with arguments check.
:param identity: Identity value.
:param name: Currency name.
:return: ``Currency`` instance.
"""
return cls(identity=str(identity), name=str(name))
class IndexLevel(typing.NamedTuple):
""" Index level (or price level).
Total return indices measure the market performance,
including price performance and income from regular cash distributions
(cash dividend payments or capital repayments).
Gross Daily Total Return: This series approximates the maximum possible reinvestment
of regular cash distributions (dividends or capital repayments).
The amount reinvested is the cash distributed to individuals resident in the country of the company,
but does not include tax credits.
Net Daily Total Return: This series approximates the minimum possible reinvestment
of regular cash distributions. Provided that the regular capital repayment is not subject to withholding tax,
the reinvestment in the Net Daily Total Return is free of withholding tax.
The Total Return Index that represents the weighted return of the MSCI parent index
and the cash component.
The Excess Return Index that represents the return of the Total Return Index
minus the return of the cash component.
"""
identity: str
name: str
@classmethod
def safe_create(
cls: typing.Type['IndexLevel'],
*,
identity: str,
name: str) -> 'IndexLevel':
""" Create new instance of ``IndexLevel`` with arguments check.
:param identity: Identity value.
:param name: Index level name.
:return: ``IndexLevel`` instance.
"""
return cls(identity=str(identity), name=str(name))
class Frequency(typing.NamedTuple):
""" Index values frequency.
"""
identity: str
name: str
@classmethod
def safe_create(
cls: typing.Type['Frequency'],
*,
identity: str,
name: str) -> 'Frequency':
""" Create new instance of ``Frequency`` with arguments check.
:param identity: Identity value.
:param name: Frequency name.
:return: ``Frequency`` instance.
"""
return cls(identity=str(identity), name=str(name))
class Style(typing.NamedTuple):
""" Index style.
"""
identity: str
name: str
@classmethod
def safe_create(
cls: typing.Type['Style'],
*,
identity: str,
name: str) -> 'Style':
""" Create new instance of ``Style`` with arguments check.
:param identity: Identity value.
:param name: Style name.
:return: ``Style`` instance.
"""
return cls(identity=str(identity), name=str(name))
class Size(typing.NamedTuple):
""" Index size.
"""
identity: str
name: str
@classmethod
def safe_create(
cls: typing.Type['Size'],
*,
identity: str,
name: str) -> 'Size':
""" Create new instance of ``Size`` with arguments check.
:param identity: Identity value.
:param name: Size name.
:return: ``Size`` instance.
"""
return cls(identity=str(identity), name=str(name))
class IndexSuiteGroup(typing.NamedTuple):
""" Group of index suites.
"""
name: str
@classmethod
def safe_create(
cls: typing.Type['IndexSuiteGroup'],
*,
name: str) -> 'IndexSuiteGroup':
""" Create new instance of ``IndexSuiteGroup`` with arguments check.
:param name: Index suite group name.
:return: ``IndexSuiteGroup`` instance.
"""
return cls(name=str(name))
class IndexSuite(typing.NamedTuple):
""" Index suite.
"""
identity: str
name: str
group: typing.Optional[IndexSuiteGroup] = None
@classmethod
def safe_create(
cls: typing.Type['IndexSuite'],
*,
identity: str,
name: str,
group: typing.Optional[IndexSuiteGroup] = None) -> 'IndexSuite':
""" Create new instance of ``IndexSuite`` with arguments check.
:param identity: Identity value.
:param name: Index suite name.
:param group: Index suite group. ``None`` if index suite is not inside any group.
:return: ``IndexSuite`` instance.
"""
if group is not None and not isinstance(group, IndexSuiteGroup):
raise TypeError("'group' is not IndexSuiteGroup")
return cls(identity=str(identity), name=str(name), group=group)
class IndexPanelData(typing.NamedTuple):
""" Container for index panel data from msci.com.
"""
markets: typing.Tuple[Market, ...]
currencies: typing.Tuple[Currency, ...]
index_levels: typing.Tuple[IndexLevel, ...]
frequencies: typing.Tuple[Frequency, ...]
index_suite_groups: typing.Tuple[IndexSuiteGroup, ...]
index_suites: typing.Tuple[IndexSuite, ...]
sizes: typing.Tuple[Size, ...]
styles: typing.Tuple[Style, ...]
daily_frequency: Frequency
monthly_frequency: Frequency
@dataclasses.dataclass
class IndexValue(InstrumentValueProvider):
""" Container for index history item.
"""
calc_date: datetime.date
level_eod: decimal.Decimal
msci_index_code: str
index_variant_type: IndexLevel
currency: Currency
def __init__(self,
*,
calc_date: datetime.date,
level_eod: decimal.Decimal,
msci_index_code: str,
index_variant_type: IndexLevel,
currency: Currency):
if not isinstance(calc_date, datetime.date):
raise TypeError("'calc_date' is not date")
if not isinstance(index_variant_type, IndexLevel):
raise TypeError("'index_variant_type' is not IndexLevel")
if not isinstance(currency, Currency):
raise TypeError("'currency' is not Currency")
self.calc_date = calc_date
self.level_eod = decimal.Decimal(level_eod)
self.msci_index_code = str(msci_index_code)
self.index_variant_type = index_variant_type
self.currency = currency
def get_instrument_value(self, tzinfo: typing.Optional[datetime.timezone]) -> InstrumentValue:
return InstrumentValue(
value=self.level_eod,
moment=datetime.datetime.combine(self.calc_date, datetime.time.min, tzinfo=tzinfo))
@dataclasses.dataclass
class IndexInfo(InstrumentInfoProvider):
""" Container for index information.
"""
msci_index_code: str
index_name: str
def __init__(self, *, msci_index_code: str, index_name: str):
self.msci_index_code = str(msci_index_code)
self.index_name = str(index_name)
def __str__(self):
return (f"MSCI index ("
f"msci_index_code={self.msci_index_code}, "
f"index_name={self.index_name})")
@property
def instrument_info(self) -> InstrumentInfo:
return InstrumentInfo(code=self.msci_index_code, name=self.index_name)
class MsciIndexesInfoDownloadParameters(typing.NamedTuple):
""" Container for ``MsciStringDataDownloader.download_instruments_info_string parameters``.
"""
index_scope: Scopes
index_market: Market
index_size: Size
index_style: Style
index_suite: IndexSuite
@classmethod
def safe_create(
cls: typing.Type['MsciIndexesInfoDownloadParameters'],
*,
index_scope: Scopes,
index_market: Market,
index_size: Size,
index_style: Style,
index_suite: IndexSuite) -> 'MsciIndexesInfoDownloadParameters':
""" Create new instance of ``MsciIndexesInfoDownloadParameters`` with arguments check.
:param index_scope: Index scope.
:param index_market: Index market.
:param index_size: Index size.
:param index_style: Index style.
:param index_suite: Index suite.
:return: ``MsciIndexesInfoDownloadParameters`` instance.
"""
if not isinstance(index_scope, Scopes):
raise TypeError(f"'index_scope' is not Scopes: {index_scope!r}")
if not isinstance(index_market, Market):
raise TypeError(f"'index_market' is not Market: {index_market!r}")
if not isinstance(index_size, Size):
raise TypeError(f"'index_size' is not Size: {index_size!r}")
if not isinstance(index_style, Style):
raise TypeError(f"'index_style' is not Style: {index_style!r}")
if not isinstance(index_suite, IndexSuite):
raise TypeError(f"'index_suite' is not IndexSuite: {index_suite!r}")
return cls(
index_scope=index_scope,
index_market=index_market,
index_size=index_size,
index_style=index_style,
index_suite=index_suite)
@dataclasses.dataclass
class MsciIndexHistoryDownloadParameters(InstrumentHistoryDownloadParameters):
""" Container for ``MsciStringDataDownloader.download_instrument_history_string parameters``.
"""
index_code: Annotated[str, InstrumentInfoParameter(instrument_identity=True)]
currency: Currency
index_variant: IndexLevel
def clone_with_instrument_info_parameters(
self,
info_download_parameters: typing.Optional[MsciIndexesInfoDownloadParameters],
instrument_info: typing.Optional[IndexInfo]) -> 'MsciIndexHistoryDownloadParameters':
return MsciIndexHistoryDownloadParameters.generate_from(self, info_download_parameters, instrument_info)
# noinspection PyUnusedLocal
@classmethod
def generate_from(
cls: typing.Type['MsciIndexHistoryDownloadParameters'],
history_download_parameters: typing.Optional['MsciIndexHistoryDownloadParameters'],
info_download_parameters: typing.Optional[MsciIndexesInfoDownloadParameters],
instrument_info: typing.Optional[IndexInfo]) -> 'MsciIndexHistoryDownloadParameters':
""" Create new history download parameters instance with data from its arguments.
:param history_download_parameters: Optional instrument history download parameters for cloning.
:param info_download_parameters: Optional instrument info download parameters for cloning.
:param instrument_info: Optional instrument info for cloning.
:return: Cloned history download parameters instance (self) with replacing some attributes from
`info_download_parameters` and `instrument_info`.
"""
return cls(
index_code=((None if history_download_parameters is None else history_download_parameters.index_code)
if instrument_info is None
else instrument_info.msci_index_code),
currency=(None if history_download_parameters is None else history_download_parameters.currency),
index_variant=(None if history_download_parameters is None else history_download_parameters.index_variant)
)
@classmethod
def safe_create(
cls: typing.Type['MsciIndexHistoryDownloadParameters'],
*,
index_code: str,
currency: Currency,
index_variant: IndexLevel) -> 'MsciIndexHistoryDownloadParameters':
""" Create new instance of ``MsciIndexHistoryDownloadParameters`` with arguments check.
:param index_code: Index code.
:param currency: Currency.
:param index_variant: Index level.
:return: ``MsciIndexHistoryDownloadParameters`` instance.
"""
if not isinstance(currency, Currency):
raise TypeError(f"'currency' is not Currency: {currency!r}")
if not isinstance(index_variant, IndexLevel):
raise TypeError(f"'index_variant' is not IndexLevel: {index_variant!r}")
return cls(
index_code=str(index_code),
currency=currency,
index_variant=index_variant)
class MsciDownloadParametersFactory(DownloadParametersFactory):
""" Download parameters factories and generators for MSCI.
"""
@property
def download_history_parameters_class(self) -> typing.Type[MsciIndexHistoryDownloadParameters]:
return MsciIndexHistoryDownloadParameters
@property
def download_history_parameters_factory(self) -> typing.Callable[..., MsciIndexHistoryDownloadParameters]:
return MsciIndexHistoryDownloadParameters.safe_create
@property
def download_info_parameters_class(self):
return MsciIndexesInfoDownloadParameters
@property
def download_info_parameters_factory(self) -> typing.Callable[..., typing.Any]:
return MsciIndexesInfoDownloadParameters.safe_create
def generate_history_download_parameters_from(
self,
history_download_parameters: typing.Optional[MsciIndexHistoryDownloadParameters],
info_download_parameters: typing.Optional[MsciIndexesInfoDownloadParameters],
instrument_info: typing.Optional[IndexInfo]) -> MsciIndexHistoryDownloadParameters:
return MsciIndexHistoryDownloadParameters.generate_from(
history_download_parameters,
info_download_parameters,
instrument_info)
|
/sane_finances-2.0-py3-none-any.whl/sane_finances/sources/msci/v2021/meta.py
| 0.8618 | 0.290248 |
meta.py
|
pypi
|
import decimal
import typing
import datetime
import enum
import dataclasses
from ...base import (
InstrumentValue, InstrumentInfo, InstrumentValueProvider, InstrumentInfoProvider,
InstrumentHistoryDownloadParameters, DownloadParametersFactory)
from ...inspection import InstrumentInfoParameter
from ....annotations import Volatile, LEGACY_ANNOTATIONS, Description
if LEGACY_ANNOTATIONS: # pragma: no cover
from ....annotations import Annotated
else: # pragma: no cover
from typing import Annotated # pylint: disable=no-name-in-module
class Scopes(enum.Enum):
""" Index scope.
"""
def __new__(cls, value: str, description: str):
obj = object.__new__(cls)
obj._value_ = value
obj.description = str(description)
return obj
def __repr__(self):
return f"<{self.__class__.__name__}.{self.name}: " \
f"'{self.value}' ('{self.description}')>"
REGIONAL = ('R', 'Regional')
COUNTRY = ('C', 'Country')
class Markets(enum.Enum):
""" Market of index.
"""
def __new__(cls, value: str, description: str, scope: Scopes):
obj = object.__new__(cls)
obj._value_ = value
obj.description = str(description)
# see https://github.com/PyCQA/pylint/issues/1801 for pylint disable hint details
obj.scope = Scopes(scope) # pylint: disable=no-value-for-parameter
return obj
def __repr__(self):
return f"<{self.__class__.__name__}.{self.name}: " \
f"'{self.value}' ('{self.description}', '{self.scope.description}')>"
REGIONAL_ALL_COUNTRY = ('1896', 'All Country (DM+EM)', Scopes.REGIONAL)
REGIONAL_CHINA_MARKETS = ('2809', 'China Markets', Scopes.REGIONAL)
REGIONAL_DEVELOPED_MARKETS = ('1897', 'Developed Markets (DM)', Scopes.REGIONAL)
REGIONAL_EMERGING_MARKETS = ('1898', 'Emerging Markets (EM)', Scopes.REGIONAL)
REGIONAL_FRONTIER_MARKETS = ('2115', 'Frontier Markets (FM)', Scopes.REGIONAL)
REGIONAL_GCC_AND_ARABIAN_MARKETS = ('1899', 'GCC and Arabian Markets', Scopes.REGIONAL)
COUNTRY_CHINA_MARKETS = ('2810', 'China Markets', Scopes.COUNTRY)
COUNTRY_DEVELOPED_MARKETS = ('1900', 'Developed Markets (DM)', Scopes.COUNTRY)
COUNTRY_EMERGING_MARKETS = ('1901', 'Emerging Markets (EM)', Scopes.COUNTRY)
COUNTRY_FRONTIER_MARKETS = ('2114', 'Frontier Markets (FM)', Scopes.COUNTRY)
COUNTRY_GCC_AND_ARABIAN_MARKETS = ('1902', 'GCC and Arabian Markets', Scopes.COUNTRY)
class Formats(enum.Enum):
""" Response format.
"""
XML = 'XML'
CSV = 'CSV'
@staticmethod
def get_file_extension(response_format: 'Formats') -> str:
""" Get suggested file extension (with dot).
:param response_format: Response format
:return: Suggested file extension (with dot).
"""
return {Formats.XML: '.xml', Formats.CSV: '.csv'}[response_format]
class Currencies(enum.Enum):
""" Index currency.
The MSCI Country and Regional Indices are calculated in local currency as well as in USD.
The concept of a “local currency” calculation excludes the impact of currency fluctuations.
"""
def __new__(cls, value: str, description: str):
obj = object.__new__(cls)
obj._value_ = value
obj.description = str(description)
return obj
def __repr__(self):
return f"<{self.__class__.__name__}.{self.name}: " \
f"'{self.value}' ('{self.description}')>"
LOCAL = ('0', 'Local')
USD = ('15', 'United States Dollar')
EUR = ('119', 'Euro')
GBP = ('18', 'British Pound Sterling')
JPY = ('10', 'Japanese Yen')
CAD = ('16', 'Canadian Dollar')
CHF = ('3', 'Swiss Franc')
HKD = ('11', 'Hong Kong Dollar')
AUD = ('1', 'Australian Dollar')
class IndexLevels(enum.Enum):
""" Index level (or price level).
Total return indices measure the market performance,
including price performance and income from regular cash distributions
(cash dividend payments or capital repayments).
Gross Daily Total Return: This series approximates the maximum possible reinvestment
of regular cash distributions (dividends or capital repayments).
The amount reinvested is the cash distributed to individuals resident in the country of the company,
but does not include tax credits.
Net Daily Total Return: This series approximates the minimum possible reinvestment
of regular cash distributions. Provided that the regular capital repayment is not subject to withholding tax,
the reinvestment in the Net Daily Total Return is free of withholding tax.
The Total Return Index that represents the weighted return of the MSCI parent index
and the cash component.
The Excess Return Index that represents the return of the Total Return Index
minus the return of the cash component.
"""
def __new__(cls, value: str, description: str):
obj = object.__new__(cls)
obj._value_ = value
obj.description = str(description)
return obj
def __repr__(self):
return f"<{self.__class__.__name__}.{self.name}: " \
f"'{self.value}' ('{self.description}')>"
PRICE = ('0', 'Price')
NET = ('41', 'Net')
GROSS = ('40', 'Gross')
TOTAL_RETURN = ('51', 'TR (for Risk Control indexes)')
EXCESS_RETURN = ('53', 'ER (for Risk Control indexes)')
class Frequencies(enum.Enum):
""" Index values frequency.
"""
DAILY = 'D'
MONTHLY = 'M'
YEARLY = 'Y'
class Styles(enum.Enum):
""" Index style.
"""
def __new__(cls, value: str, description: str):
obj = object.__new__(cls)
obj._value_ = value
obj.description = str(description)
return obj
def __repr__(self):
return f"<{self.__class__.__name__}.{self.name}: " \
f"'{self.value}' ('{self.description}')>"
NONE = ('C', 'None')
GROWTH = ('G', 'Growth')
VALUE = ('V', 'Value')
class Sizes(enum.Enum):
""" Index size.
"""
def __new__(cls, value: str, description: str, scopes: typing.FrozenSet[Scopes]):
obj = object.__new__(cls)
obj._value_ = value
obj.description = str(description)
obj.scopes = frozenset(scopes)
return obj
def __repr__(self):
return f"<{self.__class__.__name__}.{self.name}: " \
f"'{self.value}' ('{self.description}', {{{','.join(s.description for s in self.scopes)}}})>"
A_SERIES = ('111', 'A-Series', ())
REGIONAL_ALL_CAP = ('77', 'All Cap (Large+Mid+Small+Micro Cap)', {Scopes.REGIONAL})
REGIONAL_ALL_MARKET = ('108', 'All Market', {Scopes.REGIONAL})
REGIONAL_IMI = ('41', 'IMI (Large+Mid+Small Cap)', {Scopes.REGIONAL})
REGIONAL_LARGE_CAP = ('37', 'Large Cap', {Scopes.REGIONAL})
REGIONAL_MICRO_CAP = ('76', 'Micro Cap', {Scopes.REGIONAL})
REGIONAL_MID_CAP = ('38', 'Mid Cap', {Scopes.REGIONAL})
PROVISIONAL_IMI = ('119', 'Provisional IMI', ())
PROVISIONAL_SMALL_CAP = ('99', 'Provisional Small Cap', ())
PROVISIONAL_STANDARD = ('29', 'Provisional Standard', ())
REGIONAL_SMID = ('40', 'SMID (Small+Mid Cap)', {Scopes.REGIONAL})
REGIONAL_SMALL_PLUS_MICRO_CAP = ('79', 'Small + Micro Cap', {Scopes.REGIONAL})
REGIONAL_SMALL_CAP = ('39', 'Small Cap', {Scopes.REGIONAL})
REGIONAL_STANDARD = ('36', 'Standard (Large+Mid Cap)', {Scopes.REGIONAL})
COUNTRY_ALL_CAP = ('75', 'All Cap (Large+Mid+Small+Micro Cap)', {Scopes.COUNTRY})
COUNTRY_ALL_MARKET = ('107', 'All Market', {Scopes.COUNTRY})
COUNTRY_IMI = ('35', 'IMI (Large+Mid+Small Cap)', {Scopes.COUNTRY})
COUNTRY_LARGE_CAP = ('31', 'Large Cap', {Scopes.COUNTRY})
COUNTRY_MICRO_CAP = ('74', 'Micro Cap', {Scopes.COUNTRY})
COUNTRY_MID_CAP = ('32', 'Mid Cap', {Scopes.COUNTRY})
COUNTRY_SMID = ('34', 'SMID (Small+Mid Cap)', {Scopes.COUNTRY})
COUNTRY_SMALL_PLUS_MICRO_CAP = ('78', 'Small + Micro Cap', {Scopes.COUNTRY})
COUNTRY_SMALL_CAP = ('33', 'Small Cap', {Scopes.COUNTRY})
COUNTRY_STANDARD = ('30', 'Standard (Large+Mid Cap)', {Scopes.COUNTRY})
class IndexSuiteGroups(enum.Enum):
""" Group of index suites.
"""
CAPPED = 'Capped'
DOMESTIC = 'Domestic'
EQUAL_SECTOR_WEIGHTED = 'Equal Sector Weighted'
EQUAL_COUNTRY_WEIGHTED = 'Equal Country Weighted'
ESG = 'ESG'
FACTOR_HIGH_EXPOSURE = 'Factor-High Exposure'
FACTOR_HIGH_CAPACITY = 'Factor-High Capacity'
HEDGED = 'Hedged'
LISTED_REAL_ESTATE = 'Listed Real Estate'
MULTI_FACTOR = 'Multi-Factor'
THEMATIC = 'Thematic'
WMA_PRIVATE_INVESTOR_INDICES = 'WMA Private Investor Indices'
class IndexSuites(enum.Enum):
""" Index suite.
See https://app2.msci.com/products/service/index/indexmaster/indexsuites
"""
def __new__(cls, value: str, description: str, group: typing.Optional[IndexSuiteGroups]):
obj = object.__new__(cls)
obj._value_ = value
obj.description = str(description)
obj.group = group
return obj
def __repr__(self):
return f"<{self.__class__.__name__}.{self.name}: " \
f"'{self.value}' ('{self.description}', {(None if self.group is None else self.group.value)!r})>"
NONE = ('C', 'None', None)
# Capped
CAPPED_10_TO_40 = ('AA', '10/40', IndexSuiteGroups.CAPPED)
CAPPED_25_TO_50 = ('2', '25/50', IndexSuiteGroups.CAPPED)
STANDARD_CAPPED = ('9', 'Standard Capped', IndexSuiteGroups.CAPPED)
# Domestic
HONG_KONG_MPF_DOMESTIC = ('6', 'Hong Kong MPF Domestic', IndexSuiteGroups.DOMESTIC)
HONG_KONG_MPF_HEDGED = ('8', 'Hong Kong MPF Hedged', IndexSuiteGroups.DOMESTIC)
HONG_KONG_MPF_UNHEDGED = ('7', 'Hong Kong MPF Unhedged', IndexSuiteGroups.DOMESTIC)
# Equal Sector Weighted
EQUAL_SECTOR_WEIGHTED = ('ES', 'Equal Sector Weighted', IndexSuiteGroups.EQUAL_SECTOR_WEIGHTED)
# Equal Country Weighted
EQUAL_COUNTRY_WEIGHTED = ('EC', 'Equal Country Weighted', IndexSuiteGroups.EQUAL_COUNTRY_WEIGHTED)
# ESG
COUNTRY_ESG_LEADERS = ('CA', 'Country ESG LEADERS', IndexSuiteGroups.ESG)
ESG_CUSTOM = ('E', 'ESG Custom', IndexSuiteGroups.ESG)
ESG_FOCUS = ('EF', 'ESG Focus', IndexSuiteGroups.ESG)
ESG_LEADERS = ('B', 'ESG LEADERS', IndexSuiteGroups.ESG)
ESG_SCREENED = ('SR', 'ESG Screened', IndexSuiteGroups.ESG)
ESG_UNIVERSAL = ('EU', 'ESG Universal', IndexSuiteGroups.ESG)
EMPOWERING_WOMEN = ('EW', 'Empowering Women (WIN)', IndexSuiteGroups.ESG)
ENVIRONMENTAL = ('Z', 'Environmental', IndexSuiteGroups.ESG)
EX_CONTROVERSIAL_WEAPONS = ('X', 'Ex Controversial Weapons', IndexSuiteGroups.ESG)
EX_TOBACCO_INVOLVEMENT = ('TB', 'Ex Tobacco Involvement', IndexSuiteGroups.ESG)
SRI = ('J', 'SRI', IndexSuiteGroups.ESG)
WOMENS_LEADERSHIP = ('WL', "Women's Leadership", IndexSuiteGroups.ESG)
# Factor-High Exposure
BARRA_FACTOR = ('R', 'Barra Factor', IndexSuiteGroups.FACTOR_HIGH_EXPOSURE)
BUYBACK_YIELD = ('BY', 'Buyback Yield', IndexSuiteGroups.FACTOR_HIGH_EXPOSURE)
DIVIDEND_MASTERS = ('DM', 'Dividend Masters', IndexSuiteGroups.FACTOR_HIGH_EXPOSURE)
ENHANCED_VALUE = ('EV', 'Enhanced Value', IndexSuiteGroups.FACTOR_HIGH_EXPOSURE)
EQUAL_WEIGHTED = ('W', 'Equal Weighted', IndexSuiteGroups.FACTOR_HIGH_EXPOSURE)
EQUAL_WEIGHTED_BUYBACK_YIELD = ('EY', 'Equal Weighted Buyback Yield', IndexSuiteGroups.FACTOR_HIGH_EXPOSURE)
FACTOR_ESG = ('FE', 'Factor ESG', IndexSuiteGroups.FACTOR_HIGH_EXPOSURE)
GDP_WEIGHTED = ('D', 'GDP Weighted', IndexSuiteGroups.FACTOR_HIGH_EXPOSURE)
GOVERNANCE_QUALITY = ('GQ', 'Governance-Quality', IndexSuiteGroups.FACTOR_HIGH_EXPOSURE)
HIGH_DIVIDEND_YIELD = ('H', 'High Dividend Yield', IndexSuiteGroups.FACTOR_HIGH_EXPOSURE)
MARKET_NEUTRAL = ('3', 'Market Neutral', IndexSuiteGroups.FACTOR_HIGH_EXPOSURE)
MINIMUM_VOLATILITY = ('M', 'Minimum Volatility', IndexSuiteGroups.FACTOR_HIGH_EXPOSURE)
MOMENTUM = ('1', 'Momentum', IndexSuiteGroups.FACTOR_HIGH_EXPOSURE)
FACTOR_HIGH_EXPOSURE_OTHER = ('FO', 'Other', IndexSuiteGroups.FACTOR_HIGH_EXPOSURE)
PRIME_VALUE = ('PV', 'Prime Value', IndexSuiteGroups.FACTOR_HIGH_EXPOSURE)
QUALITY = ('U', 'Quality', IndexSuiteGroups.FACTOR_HIGH_EXPOSURE)
RISK_CONTROL = ('P', 'Risk Control', IndexSuiteGroups.FACTOR_HIGH_EXPOSURE)
RISK_WEIGHTED = ('K', 'Risk Weighted', IndexSuiteGroups.FACTOR_HIGH_EXPOSURE)
SECTOR_NEUTRAL_QUALITY = ('NQ', 'Sector Neutral Quality', IndexSuiteGroups.FACTOR_HIGH_EXPOSURE)
TOP_50_DIVIDEND = ('TD', 'Top 50 Dividend', IndexSuiteGroups.FACTOR_HIGH_EXPOSURE)
TOTAL_SHAREHOLDER_YIELD = ('TS', 'Total Shareholder Yield', IndexSuiteGroups.FACTOR_HIGH_EXPOSURE)
# Factor-High Capacity
DIVIDEND_TILT = ('DT', 'Dividend Tilt', IndexSuiteGroups.FACTOR_HIGH_CAPACITY)
MOMENTUM_TILT = ('MT', 'Momentum Tilt', IndexSuiteGroups.FACTOR_HIGH_CAPACITY)
QUALITY_TILT = ('QT', 'Quality Tilt', IndexSuiteGroups.FACTOR_HIGH_CAPACITY)
SIZE_TILT = ('ST', 'Size Tilt', IndexSuiteGroups.FACTOR_HIGH_CAPACITY)
VALUE_WEIGHTED = ('L', 'Value Weighted', IndexSuiteGroups.FACTOR_HIGH_CAPACITY)
VOLATILITY_TILT = ('VT', 'Volatility Tilt', IndexSuiteGroups.FACTOR_HIGH_CAPACITY)
# Hedged
HEDGED = ('5', 'Hedged', IndexSuiteGroups.HEDGED)
FACTOR_HEDGED = ('FH', 'Factor Hedged', IndexSuiteGroups.HEDGED)
ADAPTIVE_HEDGED = ('AH', 'Adaptive Hedged', IndexSuiteGroups.HEDGED)
ESG_HEDGED = ('EH', 'ESG Hedged', IndexSuiteGroups.HEDGED)
# Listed Real Estate
CORE_REAL_ESTATE = ('CE', 'Core Real Estate', IndexSuiteGroups.LISTED_REAL_ESTATE)
CORE_REAL_ESTATE_FACTOR = ('CF', 'Core Real Estate Factor', IndexSuiteGroups.LISTED_REAL_ESTATE)
# Multi-Factor
CUSTOM_FACTOR_MIX = ('CM', 'Custom Factor Mix', IndexSuiteGroups.MULTI_FACTOR)
FACTOR_MIX_A_SERIES = ('4', 'Factor Mix A-Series', IndexSuiteGroups.MULTI_FACTOR)
FACTOR_MIX_A_SERIES_CAPPED = ('MA', 'Factor Mix A-Series Capped', IndexSuiteGroups.MULTI_FACTOR)
DIVERSIFIED_MULTIPLE_FACTOR = ('DF', 'Diversified Multiple-Factor', IndexSuiteGroups.MULTI_FACTOR)
DIVERSIFIED_FACTOR_MIX = ('FM', 'Diversified Factor Mix', IndexSuiteGroups.MULTI_FACTOR)
DIVERSIFIED_MULTIPLE_FACTOR_R_SERIES = ('MR', 'Diversified Multiple-Factor R-Series', IndexSuiteGroups.MULTI_FACTOR)
DIVERSIFIED_MULTIPLE_FACTOR_LOW_VOLATILITY = ('MV', 'Diversified Multiple-Factor Low Volatility',
IndexSuiteGroups.MULTI_FACTOR)
DIVERSIFIED_MULTIPLE_5_FACTOR = ('M5', 'Diversified Multiple 5-Factor', IndexSuiteGroups.MULTI_FACTOR)
DIVERSIFIED_MULTIPLE_3_FACTOR = ('M3', 'Diversified Multiple 3-Factor', IndexSuiteGroups.MULTI_FACTOR)
ADAPTIVE_MULTIPLE_FACTOR = ('AM', 'Adaptive Multiple Factor', IndexSuiteGroups.MULTI_FACTOR)
MULTI_FACTOR_OTHER = ('MO', 'Other', IndexSuiteGroups.MULTI_FACTOR)
# Thematic
AGRICULTURE_AND_FOOD_CHAIN = ('V', 'Agriculture & Food Chain', IndexSuiteGroups.THEMATIC)
AGRICULTURE_AND_FOOD_CHAIN_SECTOR_CAPPED = ('Q', 'Agriculture & Food Chain Sector Capped',
IndexSuiteGroups.THEMATIC)
COMMODITY_PRODUCERS = ('O', 'Commodity Producers', IndexSuiteGroups.THEMATIC)
COMMODITY_PRODUCERS_SECTOR_CAPPED = ('Y', 'Commodity Producers Sector Capped', IndexSuiteGroups.THEMATIC)
CONSUMER_DEMAND = ('AB', 'Consumer Demand', IndexSuiteGroups.THEMATIC)
CONSUMER_GROWTH = ('CG', 'Consumer Growth', IndexSuiteGroups.THEMATIC)
CYCLICAL_SECTORS = ('CS', 'Cyclical Sectors', IndexSuiteGroups.THEMATIC)
CYCLICAL_SECTORS_CAPPED = ('CC', 'Cyclical Sectors Capped', IndexSuiteGroups.THEMATIC)
DEFENSIVE_SECTORS = ('DS', 'Defensive Sectors', IndexSuiteGroups.THEMATIC)
DEFENSIVE_SECTORS_CAPPED = ('DC', 'Defensive Sectors Capped', IndexSuiteGroups.THEMATIC)
ECONOMIC_EXPOSURE = ('N', 'Economic Exposure', IndexSuiteGroups.THEMATIC)
FAITH_BASED = ('F', 'Faith-based', IndexSuiteGroups.THEMATIC)
INFRASTRUCTURE = ('S', 'Infrastructure', IndexSuiteGroups.THEMATIC)
INFRASTRUCTURE_CAPPED = ('IC', 'Infrastructure Capped', IndexSuiteGroups.THEMATIC)
ISLAMIC = ('I', 'Islamic', IndexSuiteGroups.THEMATIC)
ISLAMIC_M_SERIES_NEW = ('IM', 'Islamic M -Series (New)', IndexSuiteGroups.THEMATIC)
THEMATIC_OTHER = ('TO', 'Other', IndexSuiteGroups.THEMATIC)
# WMA Private Investor Indices
WMA_PRIVATE_INVESTOR_INDICES = ('WM', 'WMA Private Investor Indices',
IndexSuiteGroups.WMA_PRIVATE_INVESTOR_INDICES)
@dataclasses.dataclass
class Context:
""" Contains the group of parameters that identifies MSCI index apart from its ID.
This context used by REST API even if we know exact index ID,
i.e. context is ambiguous but mandatory.
"""
style: Styles
size: Sizes
scope: Scopes
def __init__(self, *, style: Styles, size: Sizes, scope: Scopes):
self.style = Styles(style) # pylint: disable=no-value-for-parameter
self.size = Sizes(size) # pylint: disable=no-value-for-parameter
self.scope = Scopes(scope) # pylint: disable=no-value-for-parameter
@dataclasses.dataclass
class IndexValue(InstrumentValueProvider):
""" Container for index history item.
"""
date: datetime.date
value: decimal.Decimal
index_name: str
style: Styles
size: Sizes
def __init__(self,
*,
date: datetime.date,
value: decimal.Decimal,
index_name: str,
style: Styles,
size: Sizes):
if not isinstance(date, datetime.date):
raise TypeError("value_date is not date")
self.date = date
self.value = decimal.Decimal(value)
self.index_name = str(index_name)
self.style = Styles(style) # pylint: disable=no-value-for-parameter
self.size = Sizes(size) # pylint: disable=no-value-for-parameter
def get_instrument_value(self, tzinfo: typing.Optional[datetime.timezone]) -> InstrumentValue:
return InstrumentValue(
value=self.value,
moment=datetime.datetime.combine(self.date, datetime.time.min, tzinfo=tzinfo))
@dataclasses.dataclass
class IndexInfo(InstrumentInfoProvider):
""" Container for index information.
"""
index_id: str
name: str
def __init__(self, *, index_id: str, name: str):
self.index_id = str(index_id)
self.name = str(name)
def __str__(self):
return (f"MSCI index ("
f"index_id={self.index_id}, "
f"name={self.name})")
@property
def instrument_info(self) -> InstrumentInfo:
return InstrumentInfo(code=self.index_id, name=self.name)
class MsciIndexesInfoDownloadParameters(typing.NamedTuple):
""" Container for ``MsciStringDataDownloader.download_instruments_info_string`` parameters.
"""
market: Markets
context: Context
@classmethod
def safe_create(
cls: typing.Type['MsciIndexesInfoDownloadParameters'],
*,
market: Markets,
context: Context) -> 'MsciIndexesInfoDownloadParameters':
""" Create new instance of ``MsciIndexesInfoDownloadParameters`` with arguments check.
:param market: Market.
:param context: Context.
:return: ``MsciIndexesInfoDownloadParameters`` instance.
"""
if not isinstance(context, Context):
raise TypeError(f"'context' is not Context: {context!r}")
return cls(
market=Markets(market), # pylint: disable=no-value-for-parameter
context=context)
@dataclasses.dataclass
class MsciIndexHistoryDownloadParameters(InstrumentHistoryDownloadParameters):
""" Container for ``MsciStringDataDownloader.download_instrument_history_string`` parameters.
"""
index_id: Annotated[str, InstrumentInfoParameter(instrument_identity=True)]
context: Annotated[Context, InstrumentInfoParameter()]
index_level: IndexLevels
currency: Currencies
date_from: Annotated[datetime.date,
Description(description="Minimum date of interval to download data. "
"Usually it equals to the first date of instrument history.")]
date_to: Annotated[datetime.date,
Description(description="Maximum date of interval to download data. "
"It have to be 'today' or '31-12-9999'."),
Volatile(generator=lambda ctx: datetime.date.today(), stub_value=datetime.date.max)]
def clone_with_instrument_info_parameters(
self,
info_download_parameters: typing.Optional[MsciIndexesInfoDownloadParameters],
instrument_info: typing.Optional[IndexInfo]) -> 'MsciIndexHistoryDownloadParameters':
return MsciIndexHistoryDownloadParameters.generate_from(self, info_download_parameters, instrument_info)
@classmethod
def generate_from(
cls: typing.Type['MsciIndexHistoryDownloadParameters'],
history_download_parameters: typing.Optional['MsciIndexHistoryDownloadParameters'],
info_download_parameters: typing.Optional[MsciIndexesInfoDownloadParameters],
instrument_info: typing.Optional[IndexInfo]) -> 'MsciIndexHistoryDownloadParameters':
""" Create new history download parameters instance with data from its arguments.
:param history_download_parameters: Optional instrument history download parameters for cloning.
:param info_download_parameters: Optional instrument info download parameters for cloning.
:param instrument_info: Optional instrument info for cloning.
:return: Cloned history download parameters instance (self) with replacing some attributes from
`info_download_parameters` and `instrument_info`.
"""
return cls(
index_id=((None if history_download_parameters is None else history_download_parameters.index_id)
if instrument_info is None
else instrument_info.index_id),
context=((None if history_download_parameters is None else history_download_parameters.context)
if info_download_parameters is None
else info_download_parameters.context),
index_level=(None if history_download_parameters is None else history_download_parameters.index_level),
currency=(None if history_download_parameters is None else history_download_parameters.currency),
date_from=(None if history_download_parameters is None else history_download_parameters.date_from),
date_to=(None if history_download_parameters is None else history_download_parameters.date_to),
)
@classmethod
def safe_create(
cls: typing.Type['MsciIndexHistoryDownloadParameters'],
*,
index_id: str,
context: Context,
index_level: IndexLevels,
currency: Currencies,
date_from: datetime.date,
date_to: datetime.date) -> 'MsciIndexHistoryDownloadParameters':
""" Create new instance of ``MsciIndexHistoryDownloadParameters`` with arguments check.
:param index_id: Index ID.
:param context: Context/
:param index_level: Index level.
:param currency: Currency.
:param date_from: Download interval beginning.
:param date_to: Download interval ending.
:return: ``MsciIndexHistoryDownloadParameters`` instance.
"""
if not isinstance(context, Context):
raise TypeError(f"'context' is not Context: {context!r}")
if not isinstance(date_from, datetime.date):
raise TypeError(f"'date_from' is not datetime.date: {date_from!r}")
if not isinstance(date_to, datetime.date):
raise TypeError(f"'date_to' is not datetime.date: {date_to!r}")
if date_from > date_to:
raise ValueError(f"'date_from' ({date_from.isoformat()}) is greater than 'date_to' ({date_to.isoformat()})")
return cls(
index_id=str(index_id),
context=context,
index_level=IndexLevels(index_level), # pylint: disable=no-value-for-parameter
currency=Currencies(currency), # pylint: disable=no-value-for-parameter
date_from=date_from,
date_to=date_to)
class MsciDownloadParametersFactory(DownloadParametersFactory):
""" Download parameters factories and generators for MSCI.
"""
@property
def download_history_parameters_class(self) -> typing.Type[MsciIndexHistoryDownloadParameters]:
return MsciIndexHistoryDownloadParameters
@property
def download_history_parameters_factory(self) -> typing.Callable[..., MsciIndexHistoryDownloadParameters]:
return MsciIndexHistoryDownloadParameters.safe_create
@property
def download_info_parameters_class(self):
return MsciIndexesInfoDownloadParameters
@property
def download_info_parameters_factory(self) -> typing.Callable[..., typing.Any]:
return MsciIndexesInfoDownloadParameters.safe_create
def generate_history_download_parameters_from(
self,
history_download_parameters: typing.Optional[MsciIndexHistoryDownloadParameters],
info_download_parameters: typing.Optional[MsciIndexesInfoDownloadParameters],
instrument_info: typing.Optional[IndexInfo]) -> MsciIndexHistoryDownloadParameters:
return MsciIndexHistoryDownloadParameters.generate_from(
history_download_parameters,
info_download_parameters,
instrument_info)
|
/sane_finances-2.0-py3-none-any.whl/sane_finances/sources/msci/v1/meta.py
| 0.759671 | 0.154567 |
meta.py
|
pypi
|
import decimal
import typing
import enum
import datetime
import dataclasses
from ...base import (
InstrumentValue, InstrumentInfo, InstrumentValueProvider, InstrumentInfoProvider,
InstrumentHistoryDownloadParameters, DownloadParametersFactory)
from ...inspection import InstrumentInfoParameter
from ....annotations import LEGACY_ANNOTATIONS, Volatile
if LEGACY_ANNOTATIONS: # pragma: no cover
from ....annotations import Annotated
else: # pragma: no cover
from typing import Annotated # pylint: disable=no-name-in-module
class ResponseFormats(enum.Enum):
""" Response formats
"""
XML = 'xml'
CSV = 'csv'
JSON = 'json'
HTML = 'html'
class JsonFormats(enum.Enum):
""" JSON formats
"""
COMPACT = 'compact'
EXTENDED = 'extended'
class Limits(enum.Enum):
""" Limits of data (page size) in one response
"""
ONE = 1
FIVE = 5
TEN = 10
TWENTY = 20
FIFTY = 50
HUNDRED = 100
class TradeEngine(typing.NamedTuple):
""" Trade engine from moex.com.
"""
identity: int
name: str
title: str
@classmethod
def safe_create(
cls: typing.Type['TradeEngine'],
*,
identity: int,
name: str,
title: str) -> 'TradeEngine':
""" Create new instance of ``TradeEngine`` with arguments check.
:param identity: Identity value.
:param name: Name.
:param title: Title.
:return: ``TradeEngine`` instance.
"""
return cls(identity=int(identity), name=str(name), title=str(title))
class Market(typing.NamedTuple):
""" Market from moex.com.
"""
identity: int
trade_engine: TradeEngine
name: str
title: str
marketplace: str
@classmethod
def safe_create(
cls: typing.Type['Market'],
*,
identity: int,
trade_engine: TradeEngine,
name: str,
title: str,
marketplace: str) -> 'Market':
""" Create new instance of ``Market`` with arguments check.
:param identity: Identity value.
:param trade_engine: Trade engine.
:param name: Name.
:param title: Title.
:param marketplace: Marketplace.
:return: ``Market`` instance.
"""
if not isinstance(trade_engine, TradeEngine):
raise TypeError("'trade_engine' is not TradeEngine")
return cls(
identity=int(identity),
trade_engine=trade_engine,
name=str(name),
title=str(title),
marketplace=str(marketplace))
class Board(typing.NamedTuple):
""" Board from moex.com.
"""
identity: int
trade_engine: TradeEngine
market: Market
boardid: str
title: str
is_traded: bool
has_candles: bool
is_primary: bool
@classmethod
def safe_create(
cls: typing.Type['Board'],
*,
identity: int,
trade_engine: TradeEngine,
market: Market,
boardid: str,
title: str,
is_traded: bool,
has_candles: bool,
is_primary: bool) -> 'Board':
""" Create new instance of ``Board`` with arguments check.
:param identity: Identity value.
:param trade_engine: Trade engine.
:param market: Market.
:param boardid: Board ID.
:param title: Title.
:param is_traded: Is board traded.
:param has_candles: Has board candles.
:param is_primary: Is board primary.
:return: ``Board`` instance.
"""
if not isinstance(trade_engine, TradeEngine):
raise TypeError("'trade_engine' is not TradeEngine")
if not isinstance(market, Market):
raise TypeError("'market' is not Market")
return cls(
identity=int(identity),
trade_engine=trade_engine,
market=market,
boardid=str(boardid),
title=str(title),
is_traded=bool(is_traded),
has_candles=bool(has_candles),
is_primary=bool(is_primary))
class GlobalIndexData(typing.NamedTuple):
""" Container for global index data from moex.com.
"""
trade_engines: typing.Tuple[TradeEngine, ...]
markets: typing.Tuple[Market, ...]
boards: typing.Tuple[Board, ...]
@dataclasses.dataclass
class SecurityValue(InstrumentValueProvider):
""" Container for security history item.
"""
trade_date: datetime.date
close: decimal.Decimal
def __init__(self,
*,
trade_date: datetime.date,
close: decimal.Decimal):
""" Initialize instance.
:param trade_date: Trade date.
:param close: Close value.
"""
if not isinstance(trade_date, datetime.date):
raise TypeError("'trade_date' is not date")
self.trade_date = trade_date
self.close = decimal.Decimal(close)
def get_instrument_value(self, tzinfo: typing.Optional[datetime.timezone]) -> InstrumentValue:
return InstrumentValue(
value=self.close,
moment=datetime.datetime.combine(self.trade_date, datetime.time.min, tzinfo=tzinfo))
@dataclasses.dataclass
class SecurityInfo(InstrumentInfoProvider):
""" Container for security information.
"""
sec_id: str
board: Board
short_name: str
lot_size: typing.Optional[int]
sec_name: typing.Optional[str]
isin: typing.Optional[str]
lat_name: typing.Optional[str]
reg_number: typing.Optional[str]
coupon_period: typing.Optional[int]
coupon_percent: typing.Optional[float]
def __init__(self,
*,
sec_id: str,
board: Board,
short_name: str,
lot_size: int = None,
sec_name: str = None,
isin: str = None,
lat_name: str = None,
reg_number: str = None,
coupon_period: int = None,
coupon_percent: float = None):
if not isinstance(board, Board):
raise TypeError("'board' is not Board")
self.sec_id = str(sec_id)
self.board = board
self.short_name = str(short_name)
self.lot_size = None if lot_size is None else int(lot_size)
self.sec_name = None if sec_name is None else str(sec_name)
self.isin = None if isin is None else str(isin)
self.lat_name = None if lat_name is None else str(lat_name)
self.reg_number = None if reg_number is None else str(reg_number)
self.coupon_period = None if coupon_period is None else int(coupon_period)
self.coupon_percent = None if coupon_percent is None else float(coupon_percent)
def __str__(self):
return (f"MOEX security ("
f"sec_id={self.sec_id!r}, "
f"short_name={self.short_name!r}, "
f"sec_name={self.sec_name!r}, "
f"isin={self.isin!r}, "
f"lat_name={self.lat_name!r}, "
f"lot_size={self.lot_size}, "
f"engine={self.board.trade_engine.name!r}, "
f"market={self.board.market.name!r}, "
f"board={self.board.boardid!r}, "
f"reg_number={self.reg_number!r}, "
f"coupon_period={self.coupon_period}, "
f"coupon_percent={self.coupon_percent})")
@property
def instrument_info(self) -> InstrumentInfo:
return InstrumentInfo(code=self.sec_id, name=self.short_name)
class MoexSecuritiesInfoDownloadParameters(typing.NamedTuple):
""" Container for ``MoexStringDataDownloader.download_instruments_info_string`` parameters.
"""
board: Board
@classmethod
def safe_create(
cls: typing.Type['MoexSecuritiesInfoDownloadParameters'],
*,
board: Board) -> 'MoexSecuritiesInfoDownloadParameters':
""" Create new instance of ``MoexSecuritiesInfoDownloadParameters`` with arguments check.
:param board: Board.
:return: ``MoexSecuritiesInfoDownloadParameters`` instance.
"""
if not isinstance(board, Board):
raise TypeError("'board' is not Board")
return cls(board=board)
@dataclasses.dataclass
class MoexSecurityHistoryDownloadParameters(InstrumentHistoryDownloadParameters):
""" Container for ``MoexStringDataDownloader.download_instrument_history_string`` parameters.
"""
board: Annotated[Board, InstrumentInfoParameter()]
sec_id: Annotated[str, InstrumentInfoParameter(instrument_identity=True)]
start: Annotated[int, Volatile(generator=lambda ctx: 0, stub_value=0)]
def clone_with_instrument_info_parameters(
self,
info_download_parameters: typing.Optional[MoexSecuritiesInfoDownloadParameters],
instrument_info: typing.Optional[SecurityInfo]) -> 'MoexSecurityHistoryDownloadParameters':
return MoexSecurityHistoryDownloadParameters.generate_from(self, info_download_parameters, instrument_info)
@classmethod
def generate_from(
cls: typing.Type['MoexSecurityHistoryDownloadParameters'],
history_download_parameters: typing.Optional['MoexSecurityHistoryDownloadParameters'],
info_download_parameters: typing.Optional[MoexSecuritiesInfoDownloadParameters],
instrument_info: typing.Optional[SecurityInfo]) -> 'MoexSecurityHistoryDownloadParameters':
""" Create new history download parameters instance with data from its arguments.
:param history_download_parameters: Optional instrument history download parameters for cloning.
:param info_download_parameters: Optional instrument info download parameters for cloning.
:param instrument_info: Optional instrument info for cloning.
:return: Cloned history download parameters instance (self) with replacing some attributes from
`info_download_parameters` and `instrument_info`.
"""
return cls(
board=((None if history_download_parameters is None else history_download_parameters.board)
if info_download_parameters is None
else info_download_parameters.board),
sec_id=((None if history_download_parameters is None else history_download_parameters.sec_id)
if instrument_info is None
else instrument_info.sec_id),
start=(0 if history_download_parameters is None else history_download_parameters.start)
)
@classmethod
def safe_create(
cls: typing.Type['MoexSecurityHistoryDownloadParameters'],
*,
board: Board,
sec_id: str,
start: int) -> 'MoexSecurityHistoryDownloadParameters':
""" Create new instance of ``MoexSecurityHistoryDownloadParameters`` with arguments check.
:param board: Board.
:param sec_id: Security ID.
:param start: Start value.
:return: ``MoexSecurityHistoryDownloadParameters`` instance.
"""
if not isinstance(board, Board):
raise TypeError(f"{board!r} is not Board")
return cls(
board=board,
sec_id=str(sec_id),
start=int(start))
class MoexDownloadParametersFactory(DownloadParametersFactory):
""" Download parameters factories and generators for Moscow Exchange.
"""
@property
def download_history_parameters_class(self) -> typing.Type[MoexSecurityHistoryDownloadParameters]:
return MoexSecurityHistoryDownloadParameters
@property
def download_history_parameters_factory(self) -> typing.Callable[..., MoexSecurityHistoryDownloadParameters]:
return MoexSecurityHistoryDownloadParameters.safe_create
@property
def download_info_parameters_class(self):
return MoexSecuritiesInfoDownloadParameters
@property
def download_info_parameters_factory(self) -> typing.Callable[..., typing.Any]:
return MoexSecuritiesInfoDownloadParameters.safe_create
def generate_history_download_parameters_from(
self,
history_download_parameters: typing.Optional[MoexSecurityHistoryDownloadParameters],
info_download_parameters: typing.Optional[MoexSecuritiesInfoDownloadParameters],
instrument_info: typing.Optional[SecurityInfo]) -> MoexSecurityHistoryDownloadParameters:
return MoexSecurityHistoryDownloadParameters.generate_from(
history_download_parameters,
info_download_parameters,
instrument_info)
|
/sane_finances-2.0-py3-none-any.whl/sane_finances/sources/moex/v1_3/meta.py
| 0.735357 | 0.192046 |
meta.py
|
pypi
|
import dataclasses
import datetime
import decimal
import enum
import typing
from ...base import (
InstrumentValue, InstrumentInfo, InstrumentValueProvider, InstrumentInfoProvider,
InstrumentHistoryDownloadParameters, DownloadParametersFactory)
from ...inspection import InstrumentInfoParameter
from ....annotations import LEGACY_ANNOTATIONS
if LEGACY_ANNOTATIONS: # pragma: no cover
from ....annotations import Annotated
else: # pragma: no cover
from typing import Annotated # pylint: disable=no-name-in-module
class InfoFieldNames(enum.Enum):
""" Field names in info JSON.
"""
LOCAL_EXCHANGE_TICKER = 'localExchangeTicker'
ISIN = 'isin'
FUND_NAME = 'fundName'
INCEPTION_DATE = 'inceptionDate'
INCEPTION_DATE_R = 'r'
PRODUCT_PAGE_URL = 'productPageUrl'
@dataclasses.dataclass
class PerformanceValue(InstrumentValueProvider):
""" Container for instrument history value.
"""
date: datetime.date
value: decimal.Decimal
def __init__(self,
*,
date: datetime.date,
value: decimal.Decimal):
if not isinstance(date, datetime.date):
raise TypeError("'date' is not datetime.date")
self.date = date
self.value = decimal.Decimal(value)
def __str__(self):
return (f"iShares performance value("
f"date={self.date.isoformat()}, "
f"value={self.value})")
def get_instrument_value(self, tzinfo: typing.Optional[datetime.timezone]) -> InstrumentValue:
moment = datetime.datetime.combine(self.date, datetime.time.min, tzinfo)
return InstrumentValue(value=self.value, moment=moment)
@dataclasses.dataclass
class ProductInfo(InstrumentInfoProvider):
""" Container for instrument information.
"""
local_exchange_ticker: str
isin: str
fund_name: str
inception_date: datetime.date
product_page_url: str
def __init__(self,
*,
local_exchange_ticker: str,
isin: str,
fund_name: str,
inception_date: datetime.date,
product_page_url: str):
if not isinstance(inception_date, datetime.date):
raise TypeError("'inception_date' is not datetime.date")
self.local_exchange_ticker = str(local_exchange_ticker)
self.isin = str(isin)
self.fund_name = str(fund_name)
self.inception_date = inception_date
self.product_page_url = str(product_page_url)
def __str__(self):
return (f"iShares instrument("
f"local_exchange_ticker={self.local_exchange_ticker}, "
f"isin={self.isin}, "
f"fund_name={self.fund_name}, "
f"inception_date={self.inception_date.isoformat()}, "
f"product_page_url={self.product_page_url})")
@property
def instrument_info(self) -> InstrumentInfo:
return InstrumentInfo(code=self.local_exchange_ticker, name=self.fund_name)
class ISharesInstrumentInfoDownloadParameters(typing.NamedTuple):
""" Container for ``ISharesStringDataDownloader.download_instruments_info_string parameters``.
"""
@classmethod
def safe_create(
cls: typing.Type['ISharesInstrumentInfoDownloadParameters']) -> 'ISharesInstrumentInfoDownloadParameters':
""" Create new instance of ``ISharesInstrumentInfoDownloadParameters`` with arguments check.
:return: ``ISharesInstrumentInfoDownloadParameters`` instance.
"""
return cls()
@dataclasses.dataclass
class ISharesInstrumentHistoryDownloadParameters(InstrumentHistoryDownloadParameters):
""" Container for ``ISharesStringDataDownloader.download_instrument_history_string parameters``.
"""
product_page_url: Annotated[str, InstrumentInfoParameter(instrument_identity=True)]
def clone_with_instrument_info_parameters(
self,
info_download_parameters: typing.Optional[ISharesInstrumentInfoDownloadParameters],
instrument_info: typing.Optional[ProductInfo]) -> 'ISharesInstrumentHistoryDownloadParameters':
return ISharesInstrumentHistoryDownloadParameters.generate_from(self, info_download_parameters, instrument_info)
# noinspection PyUnusedLocal
@classmethod
def generate_from(
cls: typing.Type['ISharesInstrumentHistoryDownloadParameters'],
history_download_parameters: typing.Optional['ISharesInstrumentHistoryDownloadParameters'],
info_download_parameters: typing.Optional[ISharesInstrumentInfoDownloadParameters],
instrument_info: typing.Optional[ProductInfo]) -> 'ISharesInstrumentHistoryDownloadParameters':
""" Create new history download parameters instance with data from its arguments.
:param history_download_parameters: Optional instrument history download parameters for cloning.
:param info_download_parameters: Optional instrument info download parameters for cloning.
:param instrument_info: Optional instrument info for cloning.
:return: Cloned history download parameters instance (self) with replacing some attributes from
`info_download_parameters` and `instrument_info`.
"""
return cls(
product_page_url=(
(None if history_download_parameters is None else history_download_parameters.product_page_url)
if instrument_info is None
else instrument_info.product_page_url))
@classmethod
def safe_create(
cls: typing.Type['ISharesInstrumentHistoryDownloadParameters'],
*,
product_page_url: str) -> 'ISharesInstrumentHistoryDownloadParameters':
""" Create new instance of ``ISharesInstrumentHistoryDownloadParameters`` with arguments check.
:param product_page_url: Product page URL.
:return: ``ISharesInstrumentHistoryDownloadParameters`` instance.
"""
return cls(product_page_url=str(product_page_url))
class ISharesDownloadParametersFactory(DownloadParametersFactory):
""" Download parameters factories and generators for iShares.
"""
@property
def download_history_parameters_class(self) -> typing.Type[ISharesInstrumentHistoryDownloadParameters]:
return ISharesInstrumentHistoryDownloadParameters
@property
def download_history_parameters_factory(self) -> typing.Callable[..., ISharesInstrumentHistoryDownloadParameters]:
return ISharesInstrumentHistoryDownloadParameters.safe_create
@property
def download_info_parameters_class(self):
return ISharesInstrumentInfoDownloadParameters
@property
def download_info_parameters_factory(self) -> typing.Callable[..., typing.Any]:
return ISharesInstrumentInfoDownloadParameters.safe_create
def generate_history_download_parameters_from(
self,
history_download_parameters: typing.Optional[ISharesInstrumentHistoryDownloadParameters],
info_download_parameters: typing.Optional[ISharesInstrumentInfoDownloadParameters],
instrument_info: typing.Optional[ProductInfo]) -> ISharesInstrumentHistoryDownloadParameters:
return ISharesInstrumentHistoryDownloadParameters.generate_from(
history_download_parameters,
info_download_parameters,
instrument_info)
|
/sane_finances-2.0-py3-none-any.whl/sane_finances/sources/ishares/v2021/meta.py
| 0.750461 | 0.151624 |
meta.py
|
pypi
|
import sys
from typing import Optional, Iterable, NoReturn
from sane_out.colour import encode_ansi
class _SanePrinter:
"""
The SanePrinter class, responsible for the whole console output.
sane-out exports a "default" instance of SanePrinter; the class can be used to
create different implementations. Although it is also acceptable to modify the
module-provided version.
"""
def __init__(self, verbose: bool = False, colour: bool = True) -> None:
"""
Creates an instance of SanePrinter.
:param verbose: whether the debug messages will be output
:param colour: whether to colour the message
"""
self.verbose = verbose
self.colour = colour
def _print(
self,
message: str = "",
colour_code: Optional[Iterable[int]] = None,
err: bool = False,
):
"""Prints a message to a specified stream / file.
The method can become the sequence of ANSI codes to colour the message with.
Since colour.py offers only foreground colors and the bright modofier, most uses
will include the optional bright modifier sequence and one colour sequence.
>>> _SanePrinter()._print("Hello World")
Hello World
>>> _SanePrinter()._print("Hello Cyan World", [36])
\x1b[36mHello Cyan World\x1b[0m
The message is printed to stdout by default; one can specify a different stream:
>>> _SanePrinter()._print("Hello stderr", err=True)
(doctest doesn't support stderr testing...)
:param message: message to print
:param colours: sequence of colours to style the message, or None to print
message without colours
:param stream: stream to output the message to
"""
if err:
stream = sys.stderr
else:
stream = sys.stdout
coloured = self.colour and colour_code is not None
if message and coloured:
message = f"{encode_ansi(*colour_code)}{message}{encode_ansi(0)}"
message += "\n"
if message:
stream.write(message)
stream.flush()
def debug(self, message: str = ""):
"""
Prints a debug message.
The message is printed to the stdout in bright black (grey) colour. The message
is printed only when verbose=True:
>>> _SanePrinter().debug("Debug Message")
>>> _SanePrinter(verbose=True).debug("Debug Message")
\x1b[30;1mDebug Message\x1b[0m
:param message: message to print
"""
if self.verbose:
self._print(message, [30, 1])
def __call__(self, message: str = ""):
"""
Prints a simple message.
The message is printed to stdout without any ANSI tags.
As you may notice, this is analogue to info(); this way, SanePrinter objects
can be used as funcitions — a handy shortcut!
>>> _SanePrinter()("Hello World")
Hello World
:param message: message to print
"""
self.info(message)
def info(self, message: str = ""):
"""
Prints a simple message to the console.
The message gets printed to stdout without any ANSI tags.
>>> _SanePrinter().info("Hello World")
Hello World
:param message: message to print
"""
self._print(message)
def warning(self, message: str = ""):
"""
Prints a warning message.
The message is printed to the stderr in yellow colour.
:param message: message to print
"""
self._print(message, [33], err=True)
def error(self, message: str = "", exit_code: int = -1) -> NoReturn:
"""
Prints an error message and quits the program.
The message is printed to the stderr in red colour.
:param message: message to print
:param exit_code: code to exit the program with
"""
self.calm_error(message)
sys.exit(exit_code)
def calm_error(self, message: str = ""):
"""
Prints an error message without quitting.
The message is printed to the stderr in red colour.
:param message: message to print
"""
self._print(message, [31], err=True)
|
/sane_out-0.2.1-py3-none-any.whl/sane_out/printer.py
| 0.767603 | 0.39004 |
printer.py
|
pypi
|
import numpy
from pathlib import Path
import io
import typing
def read_uint32(f: io.BufferedReader) -> int:
return int.from_bytes(f.read(4), byteorder="little", signed=False)
def write_uint32(f: io.BufferedWriter, n: int) -> int:
return f.write(int.to_bytes(n, length=4, byteorder="little", signed=False))
def read_uint64(f: io.BufferedReader) -> int:
return int.from_bytes(f.read(8), byteorder="little", signed=False)
def write_uint64(f: io.BufferedWriter, n: int) -> int:
return f.write(int.to_bytes(n, length=8, byteorder="little", signed=False))
dtypes = {
0: numpy.dtype("float32"),
1: numpy.dtype("int32"),
2: numpy.dtype("uint32"),
3: numpy.dtype("float64"),
4: numpy.dtype("int64"),
5: numpy.dtype("uint64"),
6: numpy.dtype("int8"),
7: numpy.dtype("uint8"),
}
dtype_bytes = {(v.kind, v.itemsize): k for k, v in dtypes.items()}
def load_reader(f: io.BufferedReader) -> typing.Optional[numpy.ndarray]:
magic = f.read(4)
if magic != b"SANE":
return None
shapelen = read_uint32(f)
shape = list(reversed([read_uint64(f) for _ in range(shapelen)]))
dtype_byte = int.from_bytes(f.read(1), byteorder="little", signed=False)
dtype = dtypes.get(dtype_byte)
if dtype is None:
raise ValueError(f"Got unsupported SANE data type {dtype_byte}.")
dtype.newbyteorder("<")
payloadlen = read_uint64(f)
# allocate memory for the array
array = numpy.empty(shape=shape, dtype=dtype)
bytesread = f.readinto(array.data)
if bytesread != payloadlen:
raise OSError(f"Expected {payloadlen} bytes, but only got {bytesread}.")
return array
def load_arrays(f: io.BufferedReader) -> typing.Generator[numpy.ndarray, None, None]:
while True:
arr = load_reader(f)
if arr is None:
return
else:
yield arr
def load(path: Path) -> numpy.ndarray:
with open(path, "rb") as f:
array = load_reader(f)
assert array is not None
return array
def save_writer(f: io.BufferedWriter, array: numpy.ndarray) -> None:
f.write(b"SANE")
write_uint32(f, len(array.shape))
for dim in reversed(array.shape):
write_uint64(f, dim)
dtype_byte = dtype_bytes.get((array.dtype.kind, array.dtype.itemsize))
if dtype_byte is None:
raise ValueError(f"Cannot save {array.dtype.type} data as a SANE array.")
f.write(int.to_bytes(dtype_byte, length=1, byteorder="little", signed=False))
little = array.newbyteorder("<")
write_uint64(f, little.data.nbytes)
f.write(little.data)
def save_arrays(f: io.BufferedWriter, arrays: typing.Iterable[numpy.ndarray]) -> None:
for arr in arrays:
save_writer(f, arr)
def save(path: Path, array: numpy.ndarray) -> None:
with open(path, "wb") as f:
save_writer(f, array)
|
/sane-python-0.1.3.tar.gz/sane-python-0.1.3/sane_python/io.py
| 0.726329 | 0.486027 |
io.py
|
pypi
|
# sanetime_py3
**A sane date/time python interface:** better epoch time, timezones, and deltas, django support as well
## Forked from hubspot 'sanetime'
To support python3
## intro
**sanetime** was written to DRY up all the common date/time manipulations we all do constantly in our code while offering the most simple, versatile, and intuitive client possible.
We've all learned that the only sane way to store times is using epoch time. (You have, haven't you?)
Unfortunately, manipulating epoch time and timezones with the standard python toolset requires getting up to speed on a menagerie of python modules and concepts: datetime, date, time, calendar, pytz, dateutils, timedelta, time tuples, localize, normalize.
**sanetime** seeks to bring more sanity to the manipulations of epoch time, timezone, time delta, and time generally.
``` python
>>> from sanetime import time,delta # a tiny taste
>>> time('2012-05-01 22:31',tz='America/New_York').millis
1335925860000
>>> str(time(tz='Europe/London')) # now in London
'2012-05-29 15:28:05.178741 +Europe/London'
>>> (time(2012,6,1) - time('2012-05-01')).hours
744
>>> (time() + delta(h=12)).s # epoch seconds 12 hours from now
1338344977
```
## concepts
### time
The `time` class represents a moment in time, internally stored as microseconds since epoch.
A `time` object also has an associated timezone (UTC by default), however the timezone will never be considered during hashing, comparison or equality checks,
i.e. A moment in `time` experienced in America/New\_York is equal to the same moment in `time` experienced in Europe/Dublin.
### tztime
The `tztime` class is exactly like the `time` object, except that timezone **does** factor into equality, comparison, and hashing.
A moment in `tztime` experienced in America/New\_York is **not** the same as the same `tztime` moment experienced in Europe/Dublin.
### delta
The `delta` class represents a period of time, and provides easy access to all the different ways you might slice and dice this:
micros, millis, seconds, minutes, hours, mean\_days, mean\_weeks, mean\_months, mean\_years.
There are also many different flavors of these: rounded, floored, floated, positional, rounded\_positional.
There is no attempt made in delta yet to be calendar aware (hence the 'mean' prefixes in some cases).
### span
The `span` class represents a window of time ranging from one specific moment in time to another specific moment in time.
You can think of it as a start `time` with a `delta`, or as a start `time` and a stop `time`.
### django
A django model field is also provided: `SaneTimeField`, that makes it super simple to store a sanetime.
They honor the auto\_add and auto\_add\_now features to easily turn your sanetimes into updated\_at or created\_at fields.
And they even work with south out of the box.
## details
### time `from sanetime import time`
#### construction
You can construct a sanetime object from epoch times, datetimes, date/time parts, or from a parseable string.
Epoch microseconds are assumed when no keyword is given.
Intuitive aliases exists for kwargs, be as terse or verbose as you want (us = micros = epoch\_micros = epoch\_microseconds):
``` python
>>> time(1338508800000000)
SaneTime(1338508800000000,<UTC>)
>>> time(micros=1338508800000000)
SaneTime(1338508800000000,<UTC>)
>>> time(millis=1338508800000)
SaneTime(1338508800000000,<UTC>)
>>> time(seconds=1338508800)
SaneTime(1338508800000000,<UTC>)
>>> time(minutes=22308480, tz='America/New_York')
SaneTime(1338508800000000,<DstTzInfo 'America/New_York' EST-1 day, 19:00:00 STD>)
```
If you have the calendar parameters, then construct just as you would a datetime:
``` python
>>> time(2012,1,1)
SaneTime(1325376000000000,<UTC>)
>>> time(2012,1,1,12,30,1)
SaneTime(1325421001000000,<UTC>)
>>> time(2012,1,1,12,30,1,1, tz='America/New_York')
SaneTime(1325421001000001,<DstTzInfo 'America/New_York' EST-1 day, 19:00:00 STD>)
```
If you already have a datetime object, just construct from that:
``` python
>>> dt = datetime(2012,1,1)
>>> time(dt)
SaneTime(1325376000000000,<UTC>)
```
Or construct from a parsable string:
``` python
>>> time('January 1st, 2012 12:30:01pm')
SaneTime(1325421001000000,<UTC>)
>>> time('January 1st, 2012 12:30:01pm', tz='America/New_York')
SaneTime(1325421001000000,<DstTzInfo 'America/New_York' EST-1 day, 19:00:00 STD>)
```
#### arithmetic
Adding any int/long assumes it to be in microseconds. You can also add any `delta`:
``` python
>>> time(2012,1,1) + 5
SaneTime(1325376000000005,<UTC>)
>>> time(2012,1,1) + delta(hours=5)
SaneTime(1325394000000000,<UTC>)
```
Subtracting two sanetimes produces a `delta`:
``` python
>>> time() - time(2012,1,1) # time since new year
SaneDelta(15131339063956)
>>> abs(time() - time()).micros # microseconds to construct a time
30
```
#### conversion
You can easily convert to a timezone-aware datetime or to a "naive" datetime. They are accessed as properties.
``` python
>>> time(2012,1,1,tz='America/Los_Angeles').datetime
datetime.datetime(2012, 1, 1, 0, 0, tzinfo=<DstTzInfo 'America/Los_Angeles' PST-1 day, 16:00:00 STD>)
>>> time(2012,1,1,tz='America/Los_Angeles').naive_datetime
datetime.datetime(2012, 1, 1, 0, 0)
```
There are other convenience datetime timezone conversions as well.
``` python
>>> time(2012,1,1,tz='America/Los_Angeles').utc_datetime
datetime.datetime(2012, 1, 1, 8, 0, tzinfo=<UTC>)
>>> time(2012,1,1,tz='America/Los_Angeles').utc_naive_datetime
datetime.datetime(2012, 1, 1, 8, 0)
>>> time(2012,1,1,tz='America/Los_Angeles').ny_datetime
datetime.datetime(2012, 1, 1, 3, 0, tzinfo=<DstTzInfo 'America/New_York' EST-1 day, 19:00:00 STD>)
>>> time(2012,1,1,tz='America/Los_Angeles').ny_naive_datetime
datetime.datetime(2012, 1, 1, 3, 0)
```
To epoch times:
``` python
>>> time(2012,1,1).minutes
22089600
>>> time(2012,1,1).seconds
1325376000
>>> time(2012,1,1).millis
1325376000000
>>> time(2012,1,1).micros
1325376000000000
```
long and int conversion just bring back the epoch microseconds
``` python
>>> int(time(2012,1,1))
1325376000000000
>>> long(time(2012,1,1))
1325376000000000L
```
##### date/time parts
You can get at any of the date parts just as you might with datetime properties. Be careful-- these properties are all singular. Do not confuse with the plural epoch possiblities of the previous section. (this ambiguity will be fixed in future versions)
``` python
>>> time().year
2012
>>> time().month
6
>>> time().day
24
>>> time().hour
3
>>> time().minute
42
>>> time().second
12
>>> time().micro
664819
```
### tztime `from sanetime import tztime`
#### construction
You construct a sanetztime object with all the same possibilities as a sanetime object, but remember, now the timezone matters for equality, comparison, and hashing.
Timezone defaults to UTC if not specified.
``` python
>>> tztime()
SaneTzTime(1358919880245463,<UTC>) # now
>>> tztime(tz='America/New_York') # now in New York
SaneTzTime(1358919987623544,<DstTzInfo 'America/New_York' EST-1 day, 19:00:00 STD>)
>>> tztime(ms=1325376000000, tz='America/New_York')
SaneTzTime(1325376000000000,<DstTzInfo 'America/New_York' EST-1 day, 19:00:00 STD>)
>>> tztime(2012,1,1, tz='America/New_York')
SaneTzTime(1325394000000000,<DstTzInfo 'America/New_York' EST-1 day, 19:00:00 STD>)
```
### delta `from sanetime import delta`
#### construction
Passing no parameters specifies a 0 delta:
``` python
>>> delta()
SaneDelta(0)
```
a raw arg is assumed to be in microseconds:
``` python
>>> delta(1000)
SaneDelta(1000)
```
there are many keyword possibilities -- be as verbose or terse as you want to be -- but whatever you think it should be likely works:
``` python
>>> delta(hours=30)
SaneDelta(108000000000)
>>> delta(s=30)
SaneDelta(30000000)
>>> delta(seconds=30)
SaneDelta(30000000)
>>> delta(secs=30)
SaneDelta(30000000)
```
weeks and beyond can only be specified as "mean\_" weeks, months, etc.
That is because the specific delta of a specific week could be different depending on when the week falls, and the sanetime library and made no attempt to accomodate this yet.
A "mean\_week" is exactly 7*24 hours. A "mean_month" is exactly (365*4+1)/4/12*24 hours. A "mean_year" is exactly(365*4+1)/4*24 hours.
``` python
>>> delta(mean_months=30)
SaneDelta(18144000000000)
```
#### arithmetic
sanedeltas can be added and subtracted from any sanetime or sanetztime as described above.
sanedeltas can also be added and subtracted from one another.
if a raw number is added or subtracted from a delta it is assumed to be in micros.
``` python
>>> delta(h=1) - delta(m=1,s=1)
SaneDelta(3539000000)
>>> delta(ms=1000) - 1000
SaneDelta(999000)
```
#### conversion
delta's can be converted to any epoch number in a number of ways (rounded, whole (i.e. floored), or floated). When unspecified, they are rounded:
``` python
>>> from sanetime import delta
>>> delta(ms=9482923939).minutes # rounded
158049
>>> delta(ms=9482923939).rounded_minutes
158049
>>> delta(ms=9482923939).whole_minutes # floored
158048
>>> delta(ms=9482923939).float_minutes
158048.73231666666
```
you can also slice up deltas into their positional components -- that is, if you wanted to have a delta of 150 seconds show up as 2 minutes and 30 seconds:
``` python
>>> d = delta(s=150)
>>> d.positional_minutes
2
>>> d.positional_seconds
30
```
### span `from sanetime import span`
#### construction
You can construct from either a start and delta or a start and stop time. You must provide a kwarg to do the latter.
``` python
>>> span(time(), delta(s=90))
SaneSpan(start=SaneTime(1358925692752574,<UTC>),delta=SaneDelta(90000000))
>>> span(time(),end=time())
SaneSpan(start=SaneTime(1358925841490454,<UTC>),delta=SaneDelta(37))
```
#### methods
``` python
>>> span(time(), delta(s=90)).overlaps(span(time(),end=time())) # test for overlap
True
```
### django
TODO: write docs (functionality is solid and used without issue in production systems -- just no time for docs yet -- please feel free to help out here)
## other
### docs
Many nice little features are not documented in these pages, and are lying in the code awaiting your discovery. One day we'll get everything documented...
### faq
Why is everything stored internally as microseconds?
Python's datetime gives us access to microseconds, and since milliseconds would already have us cross the 32bit integer boundary, we might as well capture everything we can and take on microseconds as well.
There are plenty of helpers on the time, tztime, and delta that make using epoch seconds or millis just as easy as using micros.
### design principles
* simple: simplify usecases to single method/property
* intuitive: easy to remember methods/properties, with guessable aliases - be as verbose (and communicative) or as terse (and efficient) as you want to be. for example t = time(); t.ms == t.millis == t.milliseconds
* properties whenever sensible: properties are especially useful for django, cuz you can use them directly in templates without having to stage them first in the views.
### links
[sanetime in github](https://github.com/HubSpot/sanetime)
[sanetime in travis](https://travis-ci.org/HubSpot/sanetime)
[sanetime in pypi](http://pypi.python.org/pypi/sanetime)
|
/sanetime_py3-5.0.1.tar.gz/sanetime_py3-5.0.1/README.md
| 0.839603 | 0.977284 |
README.md
|
pypi
|
import os
import re
import subprocess
"""
Utilities to manage requirements files and call pip.
NOTE: this should use ONLY the standard library and not import anything else
because this is used for boostrapping with no requirements installed.
"""
def load_requirements(requirements_file="requirements.txt", with_unpinned=False):
"""
Yield package (name, version) tuples for each requirement in a `requirement`
file. Only accept requirements pinned to an exact version.
"""
with open(requirements_file) as reqs:
req_lines = reqs.read().splitlines(False)
return get_required_name_versions(req_lines, with_unpinned=with_unpinned)
def get_required_name_versions(requirement_lines, with_unpinned=False):
"""
Yield required (name, version) tuples given a`requirement_lines` iterable of
requirement text lines. Only accept requirements pinned to an exact version.
"""
for req_line in requirement_lines:
req_line = req_line.strip()
if not req_line or req_line.startswith("#"):
continue
if req_line.startswith("-") or (not with_unpinned and not "==" in req_line):
print(f"Requirement line is not supported: ignored: {req_line}")
continue
yield get_required_name_version(requirement=req_line, with_unpinned=with_unpinned)
def get_required_name_version(requirement, with_unpinned=False):
"""
Return a (name, version) tuple given a`requirement` specifier string.
Requirement version must be pinned. If ``with_unpinned`` is True, unpinned
requirements are accepted and only the name portion is returned.
For example:
>>> assert get_required_name_version("foo==1.2.3") == ("foo", "1.2.3")
>>> assert get_required_name_version("fooA==1.2.3.DEV1") == ("fooa", "1.2.3.dev1")
>>> assert get_required_name_version("foo==1.2.3", with_unpinned=False) == ("foo", "1.2.3")
>>> assert get_required_name_version("foo", with_unpinned=True) == ("foo", "")
>>> assert get_required_name_version("foo>=1.2", with_unpinned=True) == ("foo", ""), get_required_name_version("foo>=1.2")
>>> try:
... assert not get_required_name_version("foo", with_unpinned=False)
... except Exception as e:
... assert "Requirement version must be pinned" in str(e)
"""
requirement = requirement and "".join(requirement.lower().split())
assert requirement, f"specifier is required is empty:{requirement!r}"
name, operator, version = split_req(requirement)
assert name, f"Name is required: {requirement}"
is_pinned = operator == "=="
if with_unpinned:
version = ""
else:
assert is_pinned and version, f"Requirement version must be pinned: {requirement}"
return name, version
def lock_requirements(requirements_file="requirements.txt", site_packages_dir=None):
"""
Freeze and lock current installed requirements and save this to the
`requirements_file` requirements file.
"""
with open(requirements_file, "w") as fo:
fo.write(get_installed_reqs(site_packages_dir=site_packages_dir))
def lock_dev_requirements(
dev_requirements_file="requirements-dev.txt",
main_requirements_file="requirements.txt",
site_packages_dir=None,
):
"""
Freeze and lock current installed development-only requirements and save
this to the `dev_requirements_file` requirements file. Development-only is
achieved by subtracting requirements from the `main_requirements_file`
requirements file from the current requirements using package names (and
ignoring versions).
"""
main_names = {n for n, _v in load_requirements(main_requirements_file)}
all_reqs = get_installed_reqs(site_packages_dir=site_packages_dir)
all_req_lines = all_reqs.splitlines(False)
all_req_nvs = get_required_name_versions(all_req_lines)
dev_only_req_nvs = {n: v for n, v in all_req_nvs if n not in main_names}
new_reqs = "\n".join(f"{n}=={v}" for n, v in sorted(dev_only_req_nvs.items()))
with open(dev_requirements_file, "w") as fo:
fo.write(new_reqs)
def get_installed_reqs(site_packages_dir):
"""
Return the installed pip requirements as text found in `site_packages_dir`
as a text.
"""
if not os.path.exists(site_packages_dir):
raise Exception(f"site_packages directory: {site_packages_dir!r} does not exists")
# Also include these packages in the output with --all: wheel, distribute,
# setuptools, pip
args = ["pip", "freeze", "--exclude-editable", "--all", "--path", site_packages_dir]
return subprocess.check_output(args, encoding="utf-8")
comparators = (
"===",
"~=",
"!=",
"==",
"<=",
">=",
">",
"<",
)
_comparators_re = r"|".join(comparators)
version_splitter = re.compile(rf"({_comparators_re})")
def split_req(req):
"""
Return a three-tuple of (name, comparator, version) given a ``req``
requirement specifier string. Each segment may be empty. Spaces are removed.
For example:
>>> assert split_req("foo==1.2.3") == ("foo", "==", "1.2.3"), split_req("foo==1.2.3")
>>> assert split_req("foo") == ("foo", "", ""), split_req("foo")
>>> assert split_req("==1.2.3") == ("", "==", "1.2.3"), split_req("==1.2.3")
>>> assert split_req("foo >= 1.2.3 ") == ("foo", ">=", "1.2.3"), split_req("foo >= 1.2.3 ")
>>> assert split_req("foo>=1.2") == ("foo", ">=", "1.2"), split_req("foo>=1.2")
"""
assert req
# do not allow multiple constraints and tags
assert not any(c in req for c in ",;")
req = "".join(req.split())
if not any(c in req for c in comparators):
return req, "", ""
segments = version_splitter.split(req, maxsplit=1)
return tuple(segments)
|
/saneyaml-0.6.0.tar.gz/saneyaml-0.6.0/etc/scripts/utils_requirements.py
| 0.746416 | 0.286809 |
utils_requirements.py
|
pypi
|
import re
"""
Wheel platform checking
Copied and modified on 2020-12-24 from
https://github.com/pypa/warehouse/blob/37a83dd342d9e3b3ab4f6bde47ca30e6883e2c4d/warehouse/forklift/legacy.py
This contains the basic functions to check if a wheel file name is would be
supported for uploading to PyPI.
"""
# These platforms can be handled by a simple static list:
_allowed_platforms = {
"any",
"win32",
"win_amd64",
"win_ia64",
"manylinux1_x86_64",
"manylinux1_i686",
"manylinux2010_x86_64",
"manylinux2010_i686",
"manylinux2014_x86_64",
"manylinux2014_i686",
"manylinux2014_aarch64",
"manylinux2014_armv7l",
"manylinux2014_ppc64",
"manylinux2014_ppc64le",
"manylinux2014_s390x",
"linux_armv6l",
"linux_armv7l",
}
# macosx is a little more complicated:
_macosx_platform_re = re.compile(r"macosx_(?P<major>\d+)_(\d+)_(?P<arch>.*)")
_macosx_arches = {
"ppc",
"ppc64",
"i386",
"x86_64",
"arm64",
"intel",
"fat",
"fat32",
"fat64",
"universal",
"universal2",
}
_macosx_major_versions = {
"10",
"11",
}
# manylinux pep600 is a little more complicated:
_manylinux_platform_re = re.compile(r"manylinux_(\d+)_(\d+)_(?P<arch>.*)")
_manylinux_arches = {
"x86_64",
"i686",
"aarch64",
"armv7l",
"ppc64",
"ppc64le",
"s390x",
}
def is_supported_platform_tag(platform_tag):
"""
Return True if the ``platform_tag`` is supported on PyPI.
"""
if platform_tag in _allowed_platforms:
return True
m = _macosx_platform_re.match(platform_tag)
if m and m.group("major") in _macosx_major_versions and m.group("arch") in _macosx_arches:
return True
m = _manylinux_platform_re.match(platform_tag)
if m and m.group("arch") in _manylinux_arches:
return True
return False
def validate_platforms_for_pypi(platforms):
"""
Validate if the wheel platforms are supported platform tags on Pypi. Return
a list of unsupported platform tags or an empty list if all tags are
supported.
"""
# Check that if it's a binary wheel, it's on a supported platform
invalid_tags = []
for plat in platforms:
if not is_supported_platform_tag(plat):
invalid_tags.append(plat)
return invalid_tags
|
/saneyaml-0.6.0.tar.gz/saneyaml-0.6.0/etc/scripts/utils_pypi_supported_tags.py
| 0.692434 | 0.410756 |
utils_pypi_supported_tags.py
|
pypi
|
from typing import List
from sangfor_af_sdk.Common.BaseObject import BaseObject
from sangfor_af_sdk.Common.BaseResponseObject import BaseResponseObejct
#接口统计信息
class Counter(BaseObject):
def __init__(self,jsonData):
self.ObjectData = jsonData
@property
def TxDropped(self):
return self.TryGetValue("tx_dropped")
@property
def RxPackets(self):
return self.TryGetValue("rx_packets")
@property
def TxBytes(self):
return self.TryGetValue("tx_bytes")
@property
def TxPackets(self):
return self.TryGetValue("tx_packets")
@property
def RxDropped(self):
return self.TryGetValue("rx_dropped")
@property
def RxBytes(self):
return self.TryGetValue("rx_bytes")
#接口吞吐率
class Speed(BaseObject):
def __init__(self,jsonData):
self.ObjectData = jsonData
@property
def Send(self):
return self.TryGetValue("send")
@property
def Recv(self):
return self.TryGetValue("recv")
#接口包收发速率
class BagSpeed(BaseObject):
def __init__(self,jsonData):
self.ObjectData = jsonData
@property
def Sendbag(self):
return self.TryGetValue("sendbag")
@property
def Recvbag(self):
return self.TryGetValue("recvbag")
#接口信息
class Information(BaseObject):
def __init__(self,jsonData):
self.ObjectData = jsonData
@property
def Duplex(self):
return self.TryGetValue("duplex")
@property
def Duplexspeed(self):
return self.TryGetValue("duplexSpeed")
@property
def Bagspeed(self) -> BagSpeed:
tmpData = self.TryGetValue("bagSpeed")
tmpObject = BagSpeed(tmpData)
return tmpObject
@property
def Connectstatus(self):
return self.TryGetValue("connectStatus")
@property
def Speed(self) -> Speed:
tmpData = self.TryGetValue("speed")
tmpObject = Speed(tmpData)
return tmpObject
@property
def Supportedports(self):
return self.TryGetValue("supportedPorts")
@property
def Counter(self) -> Counter:
tmpData = self.TryGetValue("counter")
tmpObject = Counter(tmpData)
return tmpObject
class InterfaceStatus(BaseObject):
def __init__(self,jsonData):
self.ObjectData = jsonData
@property
def Interfacename(self):
return self.TryGetValue("interfaceName")
@property
def Information(self):
tmpData = self.TryGetValue("information")
tmpObject = Information(tmpData)
return tmpObject
class InterfaceStatusList(BaseResponseObejct):
@property
def StatusList(self) -> List[InterfaceStatus]:
tmpDataList = self.BaseData
returnList = []
for interface in tmpDataList:
returnList.append(InterfaceStatus(interface))
return returnList
|
/sangfor_af_sdk-0.7.tar.gz/sangfor_af_sdk-0.7/sangfor_af_sdk/Object/InterfaceStatus.py
| 0.691706 | 0.170284 |
InterfaceStatus.py
|
pypi
|
import empyrical as ep
import numpy as np
import pandas as pd
from addict import Dict
class Stats:
"""配套结果生成的对应输出
"""
def __init__(self, sret, bret):
"""策略收益及基准收益
pct_change
"""
self.sret = sret.copy()
self.bret = bret.copy()
self.sret.index = pd.to_datetime(self.sret.index)
self.bret.index = pd.to_datetime(self.bret.index)
self.stats = Dict()
@staticmethod
def _mdd(cum_val):
"""
cum_val datetimeindex: cumval
返回净值中最大回撤开始及结束日期
"""
backmax = cum_val.cummax()
drawdown = cum_val / backmax - 1.0
end = drawdown.idxmin()
begin = cum_val.tolist().index(backmax[end])
return int(cum_val.index[begin].strftime('%Y%m%d') +
end.strftime('%Y%m%d'))
@classmethod
# 只显示大与2%的回撤
def _get_max_drawdown_list(cls, cum_val, threshold=2e-2):
"""
cum_val datetimeindex: cumval
return
begin_dt end_dt ratio
"""
df = pd.Series(index=cum_val.index)
for ind in df.index:
df.loc[ind] = cls._mdd(cum_val.loc[:ind])
change_dt = df[df.diff().shift(-1) >= 100000000].index
max_drowlist = [int(df.loc[m]) for m in change_dt] + [int(df.iloc[-1])]
max_drowlist = [(str(x)[:8], str(x)[8:],
cum_val.loc[str(x)[8:]] / cum_val.loc[str(x)[:8]] - 1)
for x in max_drowlist]
max_drowlist = pd.DataFrame(max_drowlist,
columns=['begin_dt', 'end_dt', 'ratio'])
max_drowlist = max_drowlist[max_drowlist['ratio'] < -threshold]
max_drowlist['datelen'] = (pd.to_datetime(max_drowlist['end_dt']) -
pd.to_datetime(max_drowlist['begin_dt'])).dt.days
return max_drowlist
def run(self):
self.stats.annual_return = ep.annual_return(self.sret)
self.stats.annual_volatility = ep.annual_volatility(self.sret)
self.stats.excess_return = ep.alpha(self.sret, self.bret)
self.stats.excess_volatility = ep.annual_volatility(self.sret -
self.bret)
self.stats.max_drawdown = ep.max_drawdown(self.sret)
self.stats.information_ratio = ep.excess_sharpe(
self.sret, self.bret) * np.sqrt(252)
self.stats.max_drawdown_list = self._get_max_drawdown_list(
(self.sret.fillna(0) + 1).cumprod())
return
|
/sangreal-bt-0.0.28.tar.gz/sangreal-bt-0.0.28/sangreal_bt/stats/stats.py
| 0.44746 | 0.263226 |
stats.py
|
pypi
|
from functools import lru_cache
import pandas as pd
from sangreal_calendar.utils import dt_handle
class CalendarBase:
def __init__(self, dates=None) -> None:
self._dates = dates
@property
def dates(self):
return self._dates
def inject(self, dates):
if isinstance(dates, pd.Series):
tmp_dates = dates
else:
tmp_dates = pd.Series(dates)
tmp_dates = pd.to_datetime(tmp_dates).dt.strftime('%Y%m%d')
self._dates = tmp_dates
return
CALENDAR = CalendarBase()
def get_trade_dts(begin_dt='19900101', end_dt='20990101'):
"""[获取指定时间段的所有交易日]
Keyword Arguments:
begin_dt {str or datetime} -- [begin_dt] (default: {'19900101'})
end_dt {str or datetime} -- [end_dt] (default: {'20990101'})
astype {str} -- [list or pd] (default: {'list'})
Raises:
ValueError -- [f"astype:{astype} must be 'pd' or 'list'!"]
Returns:
[pd.DataFrame or list] -- [trade_dts between begin_dt and end_dt]
"""
tmp_df = CALENDAR.dates.copy()
begin_dt, end_dt = dt_handle(begin_dt), dt_handle(end_dt)
tmp_df = tmp_df[(tmp_df >= begin_dt)
& (tmp_df <= end_dt)].copy()
tmp_df.reset_index(drop=True, inplace=True)
return tmp_df
@lru_cache()
def adjust_trade_dt(date, adjust='last'):
"""[adjust trade_dt]
Arguments:
date {[str or datetime]} -- [date]
Keyword Arguments:
adjust {str} -- [last or next] (default: {'last'})
Raises:
ValueError -- [f"adjust:{adjust} must be 'last' or 'next'!"]
Returns:
[str] -- [adjusted trade_dt with %Y%m%d type]
"""
t_df = CALENDAR.dates.copy()
date = dt_handle(date)
if adjust == 'last':
t_df = t_df[t_df <= date]
return t_df.iloc[-1]
elif adjust == 'next':
t_df = t_df[t_df >= date]
return t_df.iloc[0]
else:
raise ValueError(f"adjust:{adjust} must be 'last' or 'next'!")
@lru_cache()
def step_trade_dt(date, step=1):
"""[step trade_dt]
Arguments:
date {[str or datetime]} -- [date]
Keyword Arguments:
step {int} -- [step] (default: {1})
Returns:
[str] -- [date with %Y%m%d type]
"""
t_df = CALENDAR.dates.copy()
date = dt_handle(date)
if step >= 0:
try:
return t_df[t_df >= date].iloc[step]
except IndexError:
return t_df.iloc[-1]
elif step < 0:
try:
return t_df[t_df < date].iloc[step]
except IndexError:
return t_df.iloc[0]
@lru_cache()
def delta_trade_dt(begin_dt, end_dt):
"""[get length of trade_dt, include begin_dt and end_dt]
Arguments:
begin_dt {[str or datetime]} -- [begin_dt]
end_dt {[tstr or datetime]} -- [end_dt]
Returns:
[int] -- [len of date_range]
"""
t_df = CALENDAR.dates.copy()
begin_dt, end_dt = dt_handle(begin_dt), dt_handle(end_dt)
return len(
t_df[(t_df >= begin_dt) & (t_df <= end_dt)])
if __name__ == "__main__":
pass
|
/sangreal-calendar-0.0.40.tar.gz/sangreal-calendar-0.0.40/sangreal_calendar/core/trade_dt_handle.py
| 0.627609 | 0.403214 |
trade_dt_handle.py
|
pypi
|
from abc import ABCMeta, abstractmethod
from functools import lru_cache
import pandas as pd
from sangreal_calendar.core.trade_dt_handle import (adjust_trade_dt,
get_trade_dts,
step_trade_dt)
from sangreal_calendar.utils import dt_handle
class RefreshBase(metaclass=ABCMeta):
"""获取调仓日期的基类
Attributes:
*args: -1 or 1, int or (1, -1)
"""
def __init__(self, *args):
if abs(sum(args)) > 1:
raise ValueError('args must be 1 or -1')
self.args = sorted(args, reverse=True)
@abstractmethod
def get(self, begin_dt, end_dt):
pass
@lru_cache()
def next(self, date, step=1, adjust=True):
"""[get next date, 20180921 -> 20180928(Monthly(-1))]
Arguments:
date {[str or datetime]} -- [date]
adjust {[bool]} -- [if adjust & date is the key day, pass]
step {[int]} -- [step numbers]
Returns:
[str] -- [next day in class frequency]
"""
end_dt = step_trade_dt(date, 600)
df = self.get(date, end_dt).tolist()
try:
if df[0] == date:
if adjust:
return df[step]
return df[step-1]
except IndexError:
return df[-1]
@lru_cache()
def prev(self, date, step=1, adjust=True):
"""[get previous day, 20180921 -> 20180831(Monthly(-1))]
Arguments:
date {[str or datetime]} -- [date]
adjust {[bool]} -- [if adjust & date is the key day, pass]
step {[int]} -- [step numbers]
Returns:
[str] -- [previous day in class frequency]
"""
begin_dt = step_trade_dt(date, -600)
df = self.get(begin_dt, date).tolist()
try:
if df[-1] == date:
if adjust:
return df[-1-step]
return df[-step]
except IndexError:
return df[0]
@staticmethod
def freq_handle(arg, df, step=1):
if arg == 1:
tmp_df = df.map(lambda x: adjust_trade_dt(x[:6] + '01', 'next'))
else:
tmp_df = df.map(lambda x: step_trade_dt(
str(int(x[:6]) + step) + '01', -1))
return tmp_df
@staticmethod
def df_handle(begin_dt='19900101', end_dt='20990101', func=None):
begin_dt = dt_handle(begin_dt)
end_dt = dt_handle(end_dt)
df = get_trade_dts(begin_dt, end_dt).copy()
df = df.map(func)
df.drop_duplicates(inplace=True)
return df
def _get(self,
begin_dt='19900101',
end_dt='20990101',
func=None,
offset=None):
begin_dt, end_dt = dt_handle(begin_dt), dt_handle(end_dt)
df = get_trade_dts(
step_trade_dt(begin_dt, -1 * offset),
step_trade_dt(end_dt, offset)).to_frame('trade_dt')
df['_trade_dt'] = pd.to_datetime(df['trade_dt'])
df['month'] = df['_trade_dt'].map(func)
all_trade_dt = pd.Series(dtype=object)
for arg in self.args:
if arg == 1:
tmp_df = df.drop_duplicates('month', keep='first')['trade_dt']
else:
tmp_df = df.drop_duplicates('month', keep='last')['trade_dt']
all_trade_dt = pd.concat([all_trade_dt, tmp_df])
all_trade_dt.sort_values(inplace=True)
all_trade_dt = all_trade_dt[
(all_trade_dt >= begin_dt)
& (all_trade_dt <= end_dt)].drop_duplicates()
all_trade_dt.reset_index(drop=True, inplace=True)
return all_trade_dt
class Daily(RefreshBase):
def get(self, begin_dt='19900101', end_dt='20990101'):
"""[get trade_dt Series with class freq]
Arguments:
RefreshBase {[cls]} -- [refreshbase]
Keyword Arguments:
begin_dt {str or datetime} -- [begin_dt] (default: {'19900101'})
end_dt {str or datetime} -- [end_dt] (default: {'20990101'})
Returns:
[pd.Series] -- [trade_dt Series]
"""
return get_trade_dts(
begin_dt,
end_dt).copy()
class Monthly(RefreshBase):
def get(self, begin_dt='19900101', end_dt='20990101'):
"""[get trade_dt Series with class freq]
Arguments:
RefreshBase {[cls]} -- [refreshbase]
Keyword Arguments:
begin_dt {str or datetime} -- [begin_dt] (default: {'19900101'})
end_dt {str or datetime} -- [end_dt] (default: {'20990101'})
Returns:
[pd.Series] -- [trade_dt Series]
"""
def func(x):
return f"{x.year}{x.month}"
return self._get(
begin_dt=begin_dt, end_dt=end_dt, func=func, offset=40)
class Weekly(RefreshBase):
def get(self, begin_dt='19900101', end_dt='20990101'):
"""[get trade_dt Series with class freq]
Arguments:
RefreshBase {[cls]} -- [refreshbase]
Keyword Arguments:
begin_dt {str or datetime} -- [begin_dt] (default: {'19900101'})
end_dt {str or datetime} -- [end_dt] (default: {'20990101'})
Returns:
[pd.Series] -- [trade_dt Series]
"""
def func(x):
tmpx = x.isocalendar()
return f"{tmpx[0]}{tmpx[1]}"
return self._get(
begin_dt=begin_dt, end_dt=end_dt, func=func, offset=20)
class BiWeekly(RefreshBase):
def get(self, begin_dt='19900101', end_dt='20990101'):
"""[get trade_dt Series with class freq]
Arguments:
RefreshBase {[cls]} -- [refreshbase]
Keyword Arguments:
begin_dt {str or datetime} -- [begin_dt] (default: {'19900101'})
end_dt {str or datetime} -- [end_dt] (default: {'20990101'})
Returns:
[pd.Series] -- [trade_dt Series]
"""
all_trade_dt = pd.Series()
for arg in self.args:
if arg == 1:
tmp_df = Weekly(1).get(begin_dt, end_dt)[::2]
else:
tmp_df = Weekly(-1).get(begin_dt, end_dt)[::2]
all_trade_dt = pd.concat([all_trade_dt, tmp_df])
all_trade_dt.sort_values(inplace=True)
all_trade_dt.drop_duplicates(inplace=True)
all_trade_dt.reset_index(drop=True, inplace=True)
return all_trade_dt
class Quarterly(RefreshBase):
def get(self, begin_dt='19900101', end_dt='20990101'):
"""[get trade_dt Series with class freq]
Arguments:
RefreshBase {[cls]} -- [refreshbase]
Keyword Arguments:
begin_dt {str or datetime} -- [begin_dt] (default: {'19900101'})
end_dt {str or datetime} -- [end_dt] (default: {'20990101'})
Returns:
[pd.Series] -- [trade_dt Series]
"""
def func(x):
return f"{x.year}{x.quarter}"
return self._get(
begin_dt=begin_dt, end_dt=end_dt, func=func, offset=120)
class Reportly(RefreshBase):
@staticmethod
def _report(x):
if x <= x[:4] + '0430':
return str(int(x[:4]) - 1) + '11'
elif x <= x[:4] + '0831':
return x[:4] + '05'
elif x <= x[:4] + '1031':
return x[:4] + '09'
elif x <= x[:4] + '1231':
return x[:4] + '11'
def get(self, begin_dt='19900101', end_dt='20990101'):
"""[get trade_dt Series with class freq]
Arguments:
RefreshBase {[cls]} -- [refreshbase]
Keyword Arguments:
begin_dt {str or datetime} -- [begin_dt] (default: {'19900101'})
end_dt {str or datetime} -- [end_dt] (default: {'20990101'})
Returns:
[pd.Series] -- [trade_dt Series]
"""
begin_dt, end_dt = dt_handle(begin_dt), dt_handle(end_dt)
df = self.df_handle(begin_dt, end_dt, self._report)
all_trade_dt = pd.Series()
for arg in self.args:
if arg == 1:
tmp_df = df.map(
lambda x: adjust_trade_dt(x[:6] + '01', 'next'))
else:
def neg_report(x):
if x[-2:] == '11':
return step_trade_dt(str(int(x[:4]) + 1) + '0501', -1)
elif x[-2:] == '09':
return step_trade_dt(x[:4] + '1101', -1)
elif x[-2:] == '05':
return step_trade_dt(x[:4] + '0901', -1)
tmp_df = df.map(neg_report)
all_trade_dt = pd.concat([all_trade_dt, tmp_df])
all_trade_dt.sort_values(inplace=True)
all_trade_dt = all_trade_dt[(all_trade_dt >= begin_dt)
& (all_trade_dt <= end_dt)].copy()
all_trade_dt.reset_index(drop=True, inplace=True)
return
class Yearly(RefreshBase):
def get(self, begin_dt='19900101', end_dt='20990101'):
"""[get trade_dt Series with class freq]
Arguments:
RefreshBase {[cls]} -- [refreshbase]
Keyword Arguments:
begin_dt {str or datetime} -- [begin_dt] (default: {'19900101'})
end_dt {str or datetime} -- [end_dt] (default: {'20990101'})
Returns:
[pd.Series] -- [trade_dt Series]
"""
def func(x):
return f"{x.year}"
return self._get(
begin_dt=begin_dt, end_dt=end_dt, func=func, offset=300)
class Halfyearly(RefreshBase):
@staticmethod
def _year(x):
if x <= x[:4] + '0630':
return x[:4] + '01'
elif x <= x[:4] + '1231':
return x[:4] + '07'
def get(self, begin_dt='19900101', end_dt='20990101'):
"""[get trade_dt Series with class freq]
Arguments:
RefreshBase {[cls]} -- [refreshbase]
Keyword Arguments:
begin_dt {str or datetime} -- [begin_dt] (default: {'19900101'})
end_dt {str or datetime} -- [end_dt] (default: {'20990101'})
Returns:
[pd.Series] -- [trade_dt Series]
"""
begin_dt, end_dt = dt_handle(begin_dt), dt_handle(end_dt)
df = self.df_handle(begin_dt, end_dt, self._year)
all_trade_dt = pd.Series()
for arg in self.args:
tmp_df = self.freq_handle(arg, df, 6)
all_trade_dt = pd.concat([all_trade_dt, tmp_df])
all_trade_dt.sort_values(inplace=True)
all_trade_dt = all_trade_dt[(all_trade_dt >= begin_dt)
& (all_trade_dt <= end_dt)].copy()
all_trade_dt.reset_index(drop=True, inplace=True)
return all_trade_dt
if __name__ == '__main__':
pass
|
/sangreal-calendar-0.0.40.tar.gz/sangreal-calendar-0.0.40/sangreal_calendar/core/refresh_rate_handle.py
| 0.648132 | 0.267038 |
refresh_rate_handle.py
|
pypi
|
Loading CSVs into SQL Databases
===============================
When faced with the problem of loading a larger-than-RAM CSV into a SQL
database from within Python, many people will jump to pandas. The workflow goes
something like this:
.. code-block:: python
>>> import sqlalchemy as sa
>>> import pandas as pd
>>> con = sa.create_engine('postgresql://localhost/db')
>>> chunks = pd.read_csv('filename.csv', chunksize=100000)
>>> for chunk in chunks:
... chunk.to_sql(name='table', if_exist='append', con=con)
There is an unnecessary and very expensive amount of data conversion going on
here. First we convert our CSV into an iterator of DataFrames, then those
DataFrames are converted into Python data structures compatible with
SQLAlchemy. Those Python objects then need to be serialized in a way that's
compatible with the database they are being sent to. Before you know it, more
time is spent converting data and serializing Python data structures than on
reading data from disk.
Use the technology that has already solved your problem well
------------------------------------------------------------
Loading CSV files into databases is a solved problem. It's a problem that has
been solved well. Instead of rolling our own loader every time we need to do
this and wasting computational resources, we should use the native loaders in
the database of our choosing. Odo lets you do this with a single line of code.
How does odo achieve native database loading speed?
---------------------------------------------------
Odo uses the native CSV loading capabilities of the databases it supports.
These loaders are extremely fast. Odo will beat any other pure Python approach
when loading large datasets. The following is a performance comparison of
loading the entire NYC taxi trip and fare combined dataset (about 33GB of text)
into PostgreSQL, MySQL, and SQLite3 using odo. Our baseline for comparison is
pandas.
**NB:** I'm happy to hear about other optimizations that I may not be taking
advantage of.
Timings
-------
CSV → PostgreSQL (22m 46s)
``````````````````````````
* READS: ~50 MB/s
* WRITES: ~50 MB/s
The ``COPY`` command built into postgresql is quite fast. Odo generates code
for the ``COPY`` command using a custom SQLAlchemy expression.
.. code-block:: python
In [1]: %time t = odo('all.csv', 'postgresql://localhost::nyc')
CPU times: user 1.43 s, sys: 330 ms, total: 1.76 s
Wall time: 22min 46s
PostgreSQL → CSV (21m 32s)
``````````````````````````
Getting data out of the database takes roughly the same amount of time as
loading it in.
``pg_bulkload`` Command Line Utility (13m 17s)
``````````````````````````````````````````````
* READS: ~50 MB/s
* WRITES: ~50 MB/s
A special command line tool called ``pg_bulkload`` exists solely for the
purpose of loading files into a postgresql table. It achieves its speedups by
disabling WAL (write ahead logging) and buffering. Odo doesn't use this (yet)
because the installation requires several steps. There are also implications
for data integrity when turning off WAL.
.. code-block:: sh
$ time ./pg_bulkload nyc2.ctl < all.csv
NOTICE: BULK LOAD START
NOTICE: BULK LOAD END
1 Rows skipped.
173179759 Rows successfully loaded.
0 Rows not loaded due to parse errors.
0 Rows not loaded due to duplicate errors.
0 Rows replaced with new rows.
./pg_bulkload nyc2.ctl < all.csv 26.14s user 33.31s system 7% cpu 13:17.31 total
CSV → MySQL (20m 49s)
`````````````````````
.. code-block:: python
In [1]: %time t = odo('all.csv', 'mysql+pymysql://localhost/test::nyc')
CPU times: user 1.32 s, sys: 304 ms, total: 1.63 s
Wall time: 20min 49s
* READS: ~30 MB/s
* WRITES: ~150 MB/s
MySQL → CSV (17m 47s)
`````````````````````
.. code-block:: python
In [1]: %time csv = odo('mysql+pymysql://localhost/test::nyc', 'nyc.csv')
CPU times: user 1.03 s, sys: 259 ms, total: 1.29 s
Wall time: 17min 47s
* READS: ~30 MB/s
* WRITES: ~30 MB/s
Similar to PostgreSQL, MySQL takes roughly the same amount of time to write a
CSV as it does to load it into a table.
CSV → SQLite3 (57m 31s\*)
`````````````````````````
.. code-block:: python
In [1]: dshape = discover(resource('all.csv'))
In [2]: %time t = odo('all.no.header.csv', 'sqlite:///db.db::nyc',
...: dshape=dshape)
CPU times: user 3.09 s, sys: 819 ms, total: 3.91 s
Wall time: 57min 31s
\* Here, we call ``discover`` on a version of the dataset that has the header
in the first line and we use a version of the dataset *without* the header line
in the sqlite3 ``.import`` command. This is sort of cheating, but I wanted to
see what the loading time of sqlite3's import command was without the overhead
of creating a new file without the header line.
SQLite3 → CSV (46m 43s)
```````````````````````
* READS: ~15 MB/s
* WRITES: ~13 MB/s
.. code-block:: python
In [1]: %time t = odo('sqlite:///db.db::nyc', 'nyc.csv')
CPU times: user 2.7 s, sys: 841 ms, total: 3.55 s
Wall time: 46min 43s
Pandas
``````
* READS: ~60 MB/s
* WRITES: ~3-5 MB/s
I didn't actually finish this timing because a single iteration of inserting
1,000,000 rows took about 4 minutes and there would be 174 such iterations
bringing the total loading time to:
.. code-block:: python
>>> 175 * 4 / 60.0 # doctest: +ELLIPSIS
11.66...
11.66 **hours**!
Nearly *12* hours to insert 175 million rows into a postgresql database. The
next slowest database (SQLite) is still **11x** faster than reading your CSV
file into pandas and then sending that ``DataFrame`` to PostgreSQL with the
``to_pandas`` method.
Final Thoughts
``````````````
For getting CSV files into the major open source databases from within Python,
nothing is faster than odo since it takes advantage of the capabilities of the
underlying database.
Don't use pandas for loading CSV files into a database.
|
/sangreal-odo-0.0.5.tar.gz/sangreal-odo-0.0.5/docs/source/perf.rst
| 0.590307 | 0.847463 |
perf.rst
|
pypi
|
Adding a new Backend
====================
Q: *How do I add new nodes to the odo graph?*
Extend Functions
----------------
We extend Odo by implementing a few functions for each new type
* ``discover`` - Return the DataShape_ of an object
* ``convert`` - Convert data to new type
* ``append`` - Append data on to existing data source
* ``resource`` - Identify data by a string URI
We extend each of these by writing new small functions that we decorate with
types. Odo will then pick these up, integrate them in to the network, and use
them when appropriate.
Discover
--------
Discover returns the DataShape_ of an object. Datashape is a potentially
nested combination of shape and datatype. It helps us to migrate metadata
consistently as we migrate the data itself. This enables us to emerge with the
right dtypes even if we have to transform through potentially lossy formats.
Example
```````
.. code-block:: python
>>> discover([1, 2, 3])
dshape("3 * int32")
>>> import numpy as np
>>> x = np.empty(shape=(3, 5), dtype=[('name', 'O'), ('balance', 'f8')])
>>> discover(x)
dshape("3 * 5 * {name: string, balance: float64}")
Extend
``````
We import ``discover`` from the ``datashape`` library and extend it with a
type.
.. code-block:: python
from datashape import discover, from_numpy
@discover(pd.DataFrame)
def discover_dataframe(df, **kwargs):
shape = (len(df),)
dtype = df.values.dtype
return from_numpy(shape, dtype)
In this simple example we rely on convenience functions within datashape to
form a datashape from a numpy shape and dtype. For more complex situations
(e.g. databases) it may be necessary to construct datashapes manually.
Convert
-------
Convert copies your data in to a new object with a different type.
Example
```````
.. code-block:: python
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> convert(list, x)
[0, 1, 2, 3, 4]
>>> import pandas as pd
>>> convert(pd.Series, x)
0 0
1 1
2 2
3 3
4 4
dtype: int64
Extend
``````
Import convert from ``odo`` and register it with two types, one for the target
and one for the source
.. code-block:: python
from odo import convert
@convert.register(list, np.ndarray)
def array_to_list(x, **kwargs):
return x.tolist()
@convert.register(pd.Series, np.ndarray)
def array_to_series(x, **kwargs):
return pd.Series(x)
Append
------
Append copies your data in to an existing dataset.
Example
```````
.. code-block:: python
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> L = [10, 20, 30]
>>> _ = append(L, x)
>>> L
[10, 20, 30, 0, 1, 2, 3, 4]
Extend
``````
Import append from ``odo`` and register it with two types, one for the target
and one for the source. Usually we teach ``odo`` how to append from one
preferred type and then use convert for all others
.. code-block:: python
from odo import append
@append.register(list, list)
def append_list_to_list(tgt, src, **kwargs):
tgt.extend(src)
return tgt
@append.register(list, object) # anything else
def append_anything_to_list(tgt, src, **kwargs):
source_as_list = convert(list, src, **kwargs)
return append(tgt, source_as_list, **kwargs)
Resource
--------
Resource creates objects from string URIs matched against regular expressions.
Example
```````
.. code-block:: python
>>> resource('myfile.hdf5')
<HDF5 file "myfile.hdf5" (mode r+)>
>>> resource('myfile.hdf5::/data', dshape='10 * 10 * int32')
<HDF5 dataset "data": shape (10, 10), type "<i4">
The objects it returns are ``h5py.File`` and ``h5py.Dataset`` respectively. In
the second case resource found that the dataset did not exist so it created it.
Extend
``````
We import ``resource`` from ``odo`` and register it with regular expressions
.. code-block:: python
from odo import resource
import h5py
@resource.register('.*\.hdf5')
def resource(uri, **kwargs):
return h5py.File(uri)
General Notes
-------------
We pass all keyword arguments from the top-level call to ``odo`` to *all*
functions. This allows special keyword arguments to trickle down to the right
place, e.g. ``delimiter=';'`` makes it to the ``pd.read_csv`` call when
interacting with CSV files, but also means that all functions that you write
must expect and handle unwanted keyword arguments. This often requires some
filtering on your part.
Even though all four of our abstract functions have a ``.register`` method they
operate in very different ways. Convert is managed by networkx and path
finding, ``append`` and ``discover`` are managed by multipledispatch_, and
``resource`` is managed by regular expressions.
Examples are useful. You may want to look at some of the ``odo`` source for
simple backends for help
https://github.com/blaze/odo/tree/master/odo/backends
.. _DataShape : datashape.html
.. _multipledispatch: http://github.com/mrocklin/multipledispatch
|
/sangreal-odo-0.0.5.tar.gz/sangreal-odo-0.0.5/docs/source/add-new-backend.rst
| 0.879587 | 0.913252 |
add-new-backend.rst
|
pypi
|
from .into import into
def odo(source, target, **kwargs):
""" Push one dataset into another
Parameters
----------
source: object or string
The source of your data. Either an object (e.g. DataFrame),
or a string ('filename.csv')
target: object or string or type
The target for where you want your data to go.
Either an object, (e.g. []), a type, (e.g. list)
or a string (e.g. 'postgresql://hostname::tablename')
raise_on_errors: bool (optional, defaults to False)
Raise exceptions rather than reroute around them
**kwargs:
keyword arguments to pass through to conversion functions.
Optional Keyword Arguments
--------------------------
Odo passes keyword arguments (like ``sep=';'``) down to the functions
that it uses to perform conversions (like ``pandas.read_csv``). Due to the
quantity of possible optional keyword arguments we can not list them here.
See the following documentation for your format
* AWS - http://odo.pydata.org/en/latest/aws.html
* CSV - http://odo.pydata.org/en/latest/csv.html
* JSON - http://odo.pydata.org/en/latest/json.html
* HDF5 - http://odo.pydata.org/en/latest/hdf5.html
* HDFS - http://odo.pydata.org/en/latest/hdfs.html
* Hive - http://odo.pydata.org/en/latest/hive.html
* SAS - http://odo.pydata.org/en/latest/sas.html
* SQL - http://odo.pydata.org/en/latest/sql.html
* SSH - http://odo.pydata.org/en/latest/ssh.html
* Mongo - http://odo.pydata.org/en/latest/mongo.html
* Spark - http://odo.pydata.org/en/latest/spark.html
Examples
--------
>>> L = odo((1, 2, 3), list) # Convert things into new things
>>> L
[1, 2, 3]
>>> _ = odo((4, 5, 6), L) # Append things onto existing things
>>> L
[1, 2, 3, 4, 5, 6]
>>> odo([('Alice', 1), ('Bob', 2)], 'myfile.csv') # doctest: +SKIP
Explanation
-----------
We can specify data with a Python object like a ``list``, ``DataFrame``,
``sqlalchemy.Table``, ``h5py.Dataset``, etc..
We can specify data with a string URI like ``'myfile.csv'``,
``'myfiles.*.json'`` or ``'sqlite:///data.db::tablename'``. These are
matched by regular expression. See the ``resource`` function for more
details on string URIs.
We can optionally specify datatypes with the ``dshape=`` keyword, providing
a datashape. This allows us to be explicit about types when mismatches
occur or when our data doesn't hold the whole picture. See the
``discover`` function for more information on ``dshape``.
>>> ds = 'var * {name: string, balance: float64}'
>>> odo([('Alice', 100), ('Bob', 200)], 'accounts.json', , dshape=ds) # doctest: +SKIP
We can optionally specify keyword arguments to pass down to relevant
conversion functions. For example, when converting a CSV file we might
want to specify delimiter
>>> odo('accounts.csv', list, has_header=True, delimiter=';') # doctest: +SKIP
These keyword arguments trickle down to whatever function ``into`` uses
convert this particular format, functions like ``pandas.read_csv``.
See Also
--------
odo.resource.resource - Specify things with strings
datashape.discover - Get datashape of data
odo.convert.convert - Convert things into new things
odo.append.append - Add things onto existing things
"""
return into(target, source, **kwargs)
|
/sangreal-odo-0.0.5.tar.gz/sangreal-odo-0.0.5/odo/odo.py
| 0.832543 | 0.44083 |
odo.py
|
pypi
|
from __future__ import absolute_import, division, print_function
import functools
from toolz import merge
from multipledispatch import Dispatcher
from .convert import convert
from .append import append
from .resource import resource
from .utils import ignoring
import datashape
from datashape import discover
from datashape.dispatch import namespace
from datashape.predicates import isdimension
from .compatibility import unicode
from pandas import DataFrame, Series
from numpy import ndarray
not_appendable_types = DataFrame, Series, ndarray, tuple
__all__ = 'into',
if 'into' not in namespace:
namespace['into'] = Dispatcher('into')
into = namespace['into']
def validate(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
dshape = kwargs.pop('dshape', None)
if isinstance(dshape, (str, unicode)):
dshape = datashape.dshape(dshape)
if dshape is not None and not isinstance(dshape, datashape.DataShape):
raise TypeError('dshape argument is not an instance of DataShape')
kwargs['dshape'] = dshape
return f(*args, **kwargs)
return wrapped
@into.register(type, object)
@validate
def into_type(a, b, dshape=None, **kwargs):
with ignoring(NotImplementedError):
if dshape is None:
dshape = discover(b)
return convert(a, b, dshape=dshape, **kwargs)
@into.register(object, object)
@validate
def into_object(target, source, dshape=None, **kwargs):
""" Push one dataset into another
Parameters
----------
source: object or string
The source of your data. Either an object (e.g. DataFrame),
target: object or string or type
The target for where you want your data to go.
Either an object, (e.g. []), a type, (e.g. list)
or a string (e.g. 'postgresql://hostname::tablename'
raise_on_errors: bool (optional, defaults to False)
Raise exceptions rather than reroute around them
**kwargs:
keyword arguments to pass through to conversion functions.
Examples
--------
>>> L = into(list, (1, 2, 3)) # Convert things into new things
>>> L
[1, 2, 3]
>>> _ = into(L, (4, 5, 6)) # Append things onto existing things
>>> L
[1, 2, 3, 4, 5, 6]
>>> into('myfile.csv', [('Alice', 1), ('Bob', 2)]) # doctest: +SKIP
Explanation
-----------
We can specify data with a Python object like a ``list``, ``DataFrame``,
``sqlalchemy.Table``, ``h5py.Dataset``, etc..
We can specify data with a string URI like ``'myfile.csv'``,
``'myfiles.*.json'`` or ``'sqlite:///data.db::tablename'``. These are
matched by regular expression. See the ``resource`` function for more
details on string URIs.
We can optionally specify datatypes with the ``dshape=`` keyword, providing
a datashape. This allows us to be explicit about types when mismatches
occur or when our data doesn't hold the whole picture. See the
``discover`` function for more information on ``dshape``.
>>> ds = 'var * {name: string, balance: float64}'
>>> into('accounts.json', [('Alice', 100), ('Bob', 200)], dshape=ds) # doctest: +SKIP
We can optionally specify keyword arguments to pass down to relevant
conversion functions. For example, when converting a CSV file we might
want to specify delimiter
>>> into(list, 'accounts.csv', has_header=True, delimiter=';') # doctest: +SKIP
These keyword arguments trickle down to whatever function ``into`` uses
convert this particular format, functions like ``pandas.read_csv``.
See Also
--------
into.resource.resource - Specify things with strings
datashape.discover - Get datashape of data
into.convert.convert - Convert things into new things
into.append.append - Add things onto existing things
"""
if isinstance(source, (str, unicode)):
source = resource(source, dshape=dshape, **kwargs)
if type(target) in not_appendable_types:
raise TypeError('target of %s type does not support in-place append' % type(target))
with ignoring(NotImplementedError):
if dshape is None:
dshape = discover(source)
return append(target, source, dshape=dshape, **kwargs)
@into.register((str, unicode), object)
@validate
def into_string(uri, b, dshape=None, **kwargs):
if dshape is None:
dshape = discover(b)
resource_ds = 0 * dshape.subshape[0] if isdimension(dshape[0]) else dshape
a = resource(uri, dshape=resource_ds, expected_dshape=dshape, **kwargs)
return into(a, b, dshape=dshape, **kwargs)
@into.register((type, (str, unicode)), (str, unicode))
@validate
def into_string_string(a, b, **kwargs):
return into(a, resource(b, **kwargs), **kwargs)
@into.register(object)
@validate
def into_curried(o, **kwargs1):
def curried_into(other, **kwargs2):
return into(o, other, **merge(kwargs2, kwargs1))
return curried_into
|
/sangreal-odo-0.0.5.tar.gz/sangreal-odo-0.0.5/odo/into.py
| 0.751283 | 0.309063 |
into.py
|
pypi
|
from __future__ import absolute_import, division, print_function
from .regex import RegexDispatcher
__all__ = 'resource'
resource = RegexDispatcher('resource')
@resource.register('.*', priority=1)
def resource_all(uri, *args, **kwargs):
""" Refer to data by strings
Translate a string pointing to data into a Python object pointing to that
data.
Filenames for common formats are valid URIs
>>> resource('myfile.csv') # doctest: +SKIP
<odo.CSV ...>
Database connection strings may embed connection information
>>> resource('postgresql://user:pass@hostname/db::tablename') # doctest: +SKIP
Table('tablename', MetaData(bind=Engine(postgres://... ... )))
When possible ``resource`` returns an object from another popular library.
In the case above ``resource`` gives you a ``sqlalchemy.Table`` object.
What kinds of strings does resource support?
--------------------------------------------
Filenames with common formats as well as collections of those files
myfile.csv - CSV files
myfile.txt - Text files
myfile.json - JSON and line-delimited JSON
myfile.*.csv - Collections of files
Some files, like HDF5 files or sqlite files, require a second piece of
information, like a datapath or tablename. We use the separator ``::`` in
these cases.
myfile.hdf5::/data/path
sqlite://myfile.db::tablename
Many systems use protocols like ``sqlite://`` to specify additional
information. We also use these to disambiguate when one file format, like
HDF5, might have several possible internal formats, like standard HDF5 or
Pandas' HDFStore
myfile.hdf5::/data/path
hdfstore://myfile.hdf5::/data/path
or JSON vs JSON-lines
json://myfile.json
jsonlines://myfile.json
These strings are defined as regular expressions. See ``resource.funcs``
to see what your installation currently supports.
>>> resource.funcs # doctest: +SKIP
{'.+\.csv)(\.gz|\.bz)?': <function odo.backends.csv.resource_csv>,
'.+\.json)(\.gz|\.bz)?': <function odo.backends.json.resource_json>,
'\w+sql\s+://.+': <function odo.backends.sql.resource_sql>,
...}
Relation with ``odo``
----------------------
The main ``odo`` function uses ``resource`` to resolve string URIs.
The following call:
>>> odo('some-sorce', target) # doctest: +SKIP
is shorthand for the following:
>>> odo(resource('some-sorce'), target) # doctest: +SKIP
Create datasets with resource
-----------------------------
Resource can also create new datasets by provding a datashape
>>> resource('myfile.hdf5::/data', dshape='1000 * 1000 * float32') # doctest: +SKIP
<HDF5 dataset "data": shape (1000, 1000), type "<f4">
To learn more about datashapes see the function ``discover``
See Also
--------
odo
discover
"""
raise NotImplementedError("Unable to parse uri to data resource: " + uri)
@resource.register('.+::.+', priority=15)
def resource_split(uri, *args, **kwargs):
uri, other = uri.rsplit('::', 1)
return resource(uri, other, *args, **kwargs)
|
/sangreal-odo-0.0.5.tar.gz/sangreal-odo-0.0.5/odo/resource.py
| 0.830491 | 0.227759 |
resource.py
|
pypi
|
from __future__ import absolute_import, division, print_function
from collections import namedtuple, Iterator
from contextlib import contextmanager
from warnings import warn
from datashape import discover
import networkx as nx
import numpy as np
from toolz import concatv
from .compatibility import map, adjacency
from .utils import expand_tuples, ignoring
ooc_types = set() # Out-of-Core types
class FailedConversionWarning(UserWarning):
def __init__(self, src, dest, exc):
self.src = src
self.dest = dest
self.exc = exc
def __str__(self):
return 'Failed on %s -> %s. Working around\nError message:\n%s' % (
self.src.__name__, self.dest.__name__, self.exc,
)
class IterProxy(object):
"""An proxy to another iterator to support swapping the underlying stream
mid-iteration.
Parameters
----------
it : iterable
The iterable to proxy.
Attributes
----------
it : iterable
The iterable being proxied. This can be reassigned to change the
underlying stream.
"""
def __init__(self, it):
self._it = iter(it)
def __next__(self):
return next(self.it)
next = __next__ # py2 compat
def __iter__(self):
return self
@property
def it(self):
return self._it
@it.setter
def it(self, value):
self._it = iter(value)
class NetworkDispatcher(object):
def __init__(self, name):
self.name = name
self.graph = nx.DiGraph()
def register(self, a, b, cost=1.0):
sigs = expand_tuples([a, b])
def _(func):
for a, b in sigs:
self.graph.add_edge(b, a, cost=cost, func=func)
return func
return _
def path(self, *args, **kwargs):
return path(self.graph, *args, **kwargs)
def __call__(self, *args, **kwargs):
return _transform(self.graph, *args, **kwargs)
def _transform(graph, target, source, excluded_edges=None, ooc_types=ooc_types,
**kwargs):
""" Transform source to target type using graph of transformations """
# take a copy so we can mutate without affecting the input
excluded_edges = (excluded_edges.copy()
if excluded_edges is not None else
set())
with ignoring(NotImplementedError):
if 'dshape' not in kwargs or kwargs['dshape'] is None:
kwargs['dshape'] = discover(source)
pth = path(graph, type(source), target,
excluded_edges=excluded_edges,
ooc_types=ooc_types)
x = source
path_proxy = IterProxy(pth)
for convert_from, convert_to, f, cost in path_proxy:
try:
x = f(x, excluded_edges=excluded_edges, **kwargs)
except NotImplementedError as e:
if kwargs.get('raise_on_errors'):
raise
warn(FailedConversionWarning(convert_from, convert_to, e))
# exclude the broken edge
excluded_edges |= {(convert_from, convert_to)}
# compute the path from `source` to `target` excluding
# the edge that broke
fresh_path = list(path(graph, type(source), target,
excluded_edges=excluded_edges,
ooc_types=ooc_types))
fresh_path_cost = path_cost(fresh_path)
# compute the path from the current `convert_from` type
# to the `target`
try:
greedy_path = list(path(graph, convert_from, target,
excluded_edges=excluded_edges,
ooc_types=ooc_types))
except nx.exception.NetworkXNoPath:
greedy_path_cost = np.inf
else:
greedy_path_cost = path_cost(greedy_path)
if fresh_path_cost < greedy_path_cost:
# it is faster to start over from `source` with a new path
x = source
pth = fresh_path
else:
# it is faster to work around our broken edge from our
# current location
pth = greedy_path
path_proxy.it = pth
return x
PathPart = namedtuple('PathPart', 'convert_from convert_to func cost')
_virtual_superclasses = (Iterator,)
def path(graph, source, target, excluded_edges=None, ooc_types=ooc_types):
""" Path of functions between two types """
if not isinstance(source, type):
source = type(source)
if not isinstance(target, type):
target = type(target)
for cls in concatv(source.mro(), _virtual_superclasses):
if cls in graph:
source = cls
break
# If both source and target are Out-Of-Core types then restrict ourselves
# to the graph of out-of-core types
if ooc_types:
oocs = tuple(ooc_types)
if issubclass(source, oocs) and issubclass(target, oocs):
graph = graph.subgraph([n for n in graph.nodes()
if issubclass(n, oocs)])
with without_edges(graph, excluded_edges) as g:
pth = nx.shortest_path(g, source=source, target=target, weight='cost')
edge = adjacency(graph)
def path_part(src, tgt):
node = edge[src][tgt]
return PathPart(src, tgt, node['func'], node['cost'])
return map(path_part, pth, pth[1:])
def path_cost(path):
"""Calculate the total cost of a path.
"""
return sum(p.cost for p in path)
@contextmanager
def without_edges(g, edges):
edges = edges or []
held = dict()
_g_edge = adjacency(g)
for a, b in edges:
held[(a, b)] = _g_edge[a][b]
g.remove_edge(a, b)
try:
yield g
finally:
for (a, b), kwargs in held.items():
g.add_edge(a, b, **kwargs)
|
/sangreal-odo-0.0.5.tar.gz/sangreal-odo-0.0.5/odo/core.py
| 0.860896 | 0.205376 |
core.py
|
pypi
|
from __future__ import absolute_import, division, print_function
import re
_pattern_type = type(re.compile(''))
def normalize(r):
"""Normalize a regular expression by ensuring that it is wrapped with:
'^' and '$'
Parameters
----------
r : str or Pattern
The pattern to normalize.
Returns
-------
p : Pattern
The compiled regex.
"""
if isinstance(r, _pattern_type):
r = r.pattern
return re.compile('^' + r.lstrip('^').rstrip('$') + '$')
class RegexDispatcher(object):
"""
Regular Expression Dispatcher
>>> f = RegexDispatcher('f')
>>> f.register('\d*')
... def parse_int(s):
... return int(s)
>>> f.register('\d*\.\d*')
... def parse_float(s):
... return float(s)
Set priorities to break ties between multiple matches.
Default priority is set to 10
>>> f.register('\w*', priority=9)
... def parse_str(s):
... return s
>>> type(f('123'))
int
>>> type(f('123.456'))
float
"""
def __init__(self, name):
self.name = name
self.funcs = {}
self.priorities = {}
def add(self, regex, func, priority=10):
self.funcs[normalize(regex)] = func
self.priorities[func] = priority
def register(self, regex, priority=10):
"""Register a new handler in this regex dispatcher.
Parameters
----------
regex : str or Pattern
The pattern to match against.
priority : int, optional
The priority for this pattern. This is used to resolve ambigious
matches. The highest priority match wins.
Returns
-------
decorator : callable
A decorator that registers the function with this RegexDispatcher
but otherwise returns the function unchanged.
"""
def _(func):
self.add(regex, func, priority)
return func
return _
def dispatch(self, s):
funcs = (func for r, func in self.funcs.items() if r.match(s))
return max(funcs, key=self.priorities.get)
def __call__(self, s, *args, **kwargs):
return self.dispatch(s)(s, *args, **kwargs)
@property
def __doc__(self):
# take the min to give the docstring of the last fallback function
return min(self.priorities.items(), key=lambda x: x[1])[0].__doc__
|
/sangreal-odo-0.0.5.tar.gz/sangreal-odo-0.0.5/odo/regex.py
| 0.899554 | 0.275903 |
regex.py
|
pypi
|
from __future__ import absolute_import, division, print_function
from datashape import discover
from datashape.dispatch import dispatch
from ..append import append
from ..convert import convert, ooc_types
from ..resource import resource
from ..chunks import chunks
from ..utils import tmpfile
import os
import numpy as np
import tables
from toolz import first
import datashape
import shutil
__all__ = ['PyTables']
@discover.register((tables.Array, tables.Table))
def discover_tables_node(n):
return datashape.from_numpy(n.shape, n.dtype)
@discover.register(tables.Node)
def discover_tables_node(n):
return discover(n._v_children) # subclasses dict
@discover.register(tables.File)
def discover_tables_node(f):
return discover(f.getNode('/'))
@append.register((tables.Array, tables.Table), np.ndarray)
def numpy_to_pytables(t, x, **kwargs):
t.append(x)
return x
@append.register((tables.Array, tables.Table), object)
def append_h5py(dset, x, **kwargs):
return append(dset, convert(chunks(np.ndarray), x, **kwargs), **kwargs)
@convert.register(np.ndarray, tables.Table, cost=3.0)
def pytables_to_numpy(t, **kwargs):
return t[:]
@convert.register(chunks(np.ndarray), tables.Table, cost=3.0)
def pytables_to_numpy_chunks(t, chunksize=2**20, **kwargs):
def load():
for i in range(0, t.shape[0], chunksize):
yield t[i: i + chunksize]
return chunks(np.ndarray)(load)
def dtype_to_pytables(dtype):
""" Convert NumPy dtype to PyTable descriptor
Examples
--------
>>> from tables import Int32Col, StringCol, Time64Col
>>> dt = np.dtype([('name', 'S7'), ('amount', 'i4'), ('time', 'M8[us]')])
>>> dtype_to_pytables(dt) # doctest: +SKIP
{'amount': Int32Col(shape=(), dflt=0, pos=1),
'name': StringCol(itemsize=7, shape=(), dflt='', pos=0),
'time': Time64Col(shape=(), dflt=0.0, pos=2)}
"""
d = {}
for pos, name in enumerate(dtype.names):
dt, _ = dtype.fields[name]
if issubclass(dt.type, np.datetime64):
tdtype = tables.Description({name: tables.Time64Col(pos=pos)}),
else:
tdtype = tables.descr_from_dtype(np.dtype([(name, dt)]))
el = first(tdtype)
getattr(el, name)._v_pos = pos
d.update(el._v_colobjects)
return d
def PyTables(path, datapath, dshape=None, **kwargs):
"""Create or open a ``tables.Table`` object.
Parameters
----------
path : str
Path to a PyTables HDF5 file.
datapath : str
The name of the node in the ``tables.File``.
dshape : str or datashape.DataShape
DataShape to use to create the ``Table``.
Returns
-------
t : tables.Table
Examples
--------
>>> from odo.utils import tmpfile
>>> # create from scratch
>>> with tmpfile('.h5') as f:
... t = PyTables(filename, '/bar',
... dshape='var * {volume: float64, planet: string[10, "A"]}')
... data = [(100.3, 'mars'), (100.42, 'jupyter')]
... t.append(data)
... t[:] # doctest: +SKIP
...
array([(100.3, b'mars'), (100.42, b'jupyter')],
dtype=[('volume', '<f8'), ('planet', 'S10')])
"""
def possibly_create_table(filename, dtype):
f = tables.open_file(filename, mode='a')
try:
if datapath not in f:
if dtype is None:
raise ValueError('dshape cannot be None and datapath not'
' in file')
else:
f.create_table('/', datapath.lstrip('/'), description=dtype)
finally:
f.close()
if dshape:
if isinstance(dshape, str):
dshape = datashape.dshape(dshape)
if dshape[0] == datashape.var:
dshape = dshape.subshape[0]
dtype = dtype_to_pytables(datashape.to_numpy_dtype(dshape))
else:
dtype = None
if os.path.exists(path):
possibly_create_table(path, dtype)
else:
with tmpfile('.h5') as filename:
possibly_create_table(filename, dtype)
shutil.copyfile(filename, path)
return tables.open_file(path, mode='a').get_node(datapath)
@resource.register('pytables://.+', priority=11)
def resource_pytables(path, datapath, **kwargs):
return PyTables(path[len('pytables://'):], datapath, **kwargs)
@dispatch((tables.Table, tables.Array))
def drop(t):
t.remove()
@dispatch(tables.File)
def drop(f):
f.close()
os.remove(f.filename)
ooc_types |= set((tables.Table, tables.Array))
|
/sangreal-odo-0.0.5.tar.gz/sangreal-odo-0.0.5/odo/backends/pytables.py
| 0.536799 | 0.310342 |
pytables.py
|
pypi
|
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
import datashape
from datashape import discover
from ..append import append
from ..convert import convert, ooc_types
from ..chunks import chunks
from ..resource import resource
from ..utils import filter_kwargs
@discover.register(pd.HDFStore)
def discover_hdfstore(f):
d = dict()
for key in f.keys():
d2 = d
key2 = key.lstrip('/')
while '/' in key2:
group, key2 = key2.split('/', 1)
if group not in d2:
d2[group] = dict()
d2 = d2[group]
d2[key2] = f.get_storer(key)
return discover(d)
@discover.register(pd.io.pytables.Fixed)
def discover_hdfstore_storer(storer):
f = storer.parent
n = storer.shape
if isinstance(n, list):
n = n[0]
measure = discover(f.select(storer.pathname, start=0, stop=10)).measure
return n * measure
@convert.register(chunks(pd.DataFrame), pd.io.pytables.AppendableFrameTable)
def hdfstore_to_chunks_dataframes(data, chunksize=100000, **kwargs):
if (isinstance(chunksize, (float, np.floating)) and
not chunksize.is_integer()):
raise TypeError('chunksize argument must be an integer, got %s' %
chunksize)
chunksize = int(chunksize)
def f():
k = min(chunksize, 100)
yield data.parent.select(data.pathname, start=0, stop=k)
for chunk in data.parent.select(data.pathname, chunksize=chunksize,
start=k):
yield chunk
return chunks(pd.DataFrame)(f)
@convert.register(pd.DataFrame, (pd.io.pytables.AppendableFrameTable,
pd.io.pytables.FrameFixed))
def hdfstore_to_chunks_dataframes(data, **kwargs):
return data.read()
pytables_h5py_explanation = """
You've run in to a conflict between the two HDF5 libraries in Python,
H5Py and PyTables. You're trying to do something that requires PyTables but
H5Py was loaded first and the two libraries don't share well.
To resolve this you'll have to restart your Python process and ensure that you
import tables
before you import projects like odo or into or blaze."""
from collections import namedtuple
EmptyHDFStoreDataset = namedtuple('EmptyHDFStoreDataset', 'parent,pathname,dshape')
@resource.register('hdfstore://.+', priority=11)
def resource_hdfstore(uri, datapath=None, dshape=None, **kwargs):
# TODO:
# 1. Support nested datashapes (e.g. groups)
# 2. Try translating unicode to ascii? (PyTables fails here)
fn = uri.split('://')[1]
try:
f = pd.HDFStore(fn, **filter_kwargs(pd.HDFStore, kwargs))
except RuntimeError as e:
raise type(e)(pytables_h5py_explanation)
if dshape is None:
return f.get_storer(datapath) if datapath else f
dshape = datashape.dshape(dshape)
# Already exists, return it
if datapath in f:
return f.get_storer(datapath)
# Need to create new dataset.
# HDFStore doesn't support empty datasets, so we use a proxy object.
return EmptyHDFStoreDataset(f, datapath, dshape)
@append.register((pd.io.pytables.Fixed, EmptyHDFStoreDataset), pd.DataFrame)
def append_dataframe_to_hdfstore(store, df, **kwargs):
store.parent.append(store.pathname, df, append=True)
return store.parent.get_storer(store.pathname)
@append.register((pd.io.pytables.Fixed, EmptyHDFStoreDataset),
chunks(pd.DataFrame))
def append_chunks_dataframe_to_hdfstore(store, c, **kwargs):
parent = store.parent
for chunk in c:
parent.append(store.pathname, chunk)
return parent.get_storer(store.pathname)
@append.register((pd.io.pytables.Fixed, EmptyHDFStoreDataset), object)
def append_object_to_hdfstore(store, o, **kwargs):
return append(store, convert(chunks(pd.DataFrame), o, **kwargs), **kwargs)
ooc_types.add(pd.io.pytables.AppendableFrameTable)
|
/sangreal-odo-0.0.5.tar.gz/sangreal-odo-0.0.5/odo/backends/hdfstore.py
| 0.440951 | 0.248181 |
hdfstore.py
|
pypi
|
from __future__ import absolute_import, division, print_function
import os
from bcolz import ctable, carray
import numpy as np
from toolz import keyfilter
import datashape
from datashape import discover
import shutil
from ..numpy_dtype import dshape_to_numpy
from ..append import append
from ..convert import convert, ooc_types
from ..resource import resource
from ..drop import drop
from ..chunks import chunks
keywords = ['cparams', 'dflt', 'expectedlen', 'chunklen', 'rootdir']
@discover.register((ctable, carray))
def discover_bcolz(c, **kwargs):
return datashape.from_numpy(c.shape, c.dtype)
@append.register((ctable, carray), np.ndarray)
def numpy_append_to_bcolz(a, b, **kwargs):
a.append(b)
a.flush()
return a
@append.register((ctable, carray), object)
def numpy_append_to_bcolz(a, b, **kwargs):
return append(a, convert(chunks(np.ndarray), b, **kwargs), **kwargs)
@convert.register(ctable, np.ndarray, cost=2.0)
def convert_numpy_to_bcolz_ctable(x, **kwargs):
return ctable(x, **keyfilter(keywords.__contains__, kwargs))
@convert.register(carray, np.ndarray, cost=2.0)
def convert_numpy_to_bcolz_carray(x, **kwargs):
return carray(x, **keyfilter(keywords.__contains__, kwargs))
@convert.register(np.ndarray, (carray, ctable), cost=1.0)
def convert_bcolz_to_numpy(x, **kwargs):
return x[:]
@append.register((carray, ctable), chunks(np.ndarray))
def append_carray_with_chunks(a, c, **kwargs):
for chunk in c:
append(a, chunk)
a.flush()
return a
@convert.register(chunks(np.ndarray), (ctable, carray), cost=1.2)
def bcolz_to_numpy_chunks(x, chunksize=2**20, **kwargs):
def load():
first_n = min(1000, chunksize)
first = x[:first_n]
yield first
for i in range(first_n, x.shape[0], chunksize):
yield x[i: i + chunksize]
return chunks(np.ndarray)(load)
@resource.register('.*\.bcolz/?')
def resource_bcolz(uri, dshape=None, expected_dshape=None, **kwargs):
if os.path.exists(uri):
try:
return ctable(rootdir=uri)
except IOError: # __rootdirs__ doesn't exist because we aren't a ctable
return carray(rootdir=uri)
else:
if not dshape:
raise ValueError("Must specify either existing bcolz directory or"
" valid datashape")
dshape = datashape.dshape(dshape)
dt = dshape_to_numpy(dshape)
shape_tail = tuple(map(int, dshape.shape[1:])) # tail of shape
if dshape.shape[0] == datashape.var:
shape = (0,) + shape_tail
else:
shape = (int(dshape.shape[0]),) + shape_tail
x = np.empty(shape=shape, dtype=dt)
kwargs = keyfilter(keywords.__contains__, kwargs)
expectedlen = kwargs.pop('expectedlen',
int(expected_dshape[0])
if expected_dshape is not None and
isinstance(expected_dshape[0], datashape.Fixed)
else None)
if datashape.predicates.isrecord(dshape.measure):
return ctable(x, rootdir=uri, expectedlen=expectedlen, **kwargs)
else:
return carray(x, rootdir=uri, expectedlen=expectedlen, **kwargs)
@drop.register((carray, ctable))
def drop_bcolz(b, **kwargs):
b.flush()
shutil.rmtree(b.rootdir)
ooc_types |= set((carray, ctable))
|
/sangreal-odo-0.0.5.tar.gz/sangreal-odo-0.0.5/odo/backends/bcolz.py
| 0.476823 | 0.345768 |
bcolz.py
|
pypi
|
from __future__ import absolute_import, division, print_function
from collections import Iterator
import numpy as np
import pandas as pd
from datashape.dispatch import dispatch
from datashape import from_numpy, var
from dask.array.core import Array, from_array
from dask.bag.core import Bag
import dask.bag as db
from dask.compatibility import long
import dask.dataframe as dd
from odo import append, chunks, convert, discover, TextFile
from ..utils import filter_kwargs
@discover.register(Array)
def discover_dask_array(a, **kwargs):
return from_numpy(a.shape, a.dtype)
@discover.register(dd.Series)
@discover.register(dd.DataFrame)
def discover_dask_dataframe(df):
return var * discover(df.head()).measure
arrays = [np.ndarray]
try:
import h5py
except ImportError:
pass
else:
arrays.append(h5py.Dataset)
@dispatch(h5py.Dataset, (int, long))
def resize(x, size):
s = list(x.shape)
s[0] = size
return resize(x, tuple(s))
@dispatch(h5py.Dataset, tuple)
def resize(x, shape):
return x.resize(shape)
try:
import bcolz
except ImportError:
pass
else:
arrays.append(bcolz.carray)
@dispatch(bcolz.carray, (int, long))
def resize(x, size):
return x.resize(size)
@convert.register(Array, tuple(arrays), cost=1.)
def array_to_dask(x, name=None, chunks=None, **kwargs):
if chunks is None:
raise ValueError("chunks cannot be None")
return from_array(x, chunks=chunks, name=name,
**filter_kwargs(from_array, kwargs))
@convert.register(np.ndarray, Array, cost=10.)
def dask_to_numpy(x, **kwargs):
return np.array(x)
@convert.register(pd.DataFrame, dd.DataFrame, cost=200)
@convert.register(pd.Series, dd.Series, cost=200)
@convert.register(float, Array, cost=200)
def dask_to_other(x, **kwargs):
return x.compute()
@append.register(tuple(arrays), Array)
def store_Array_in_ooc_data(out, arr, inplace=False, **kwargs):
if not inplace:
# Resize output dataset to accept new data
assert out.shape[1:] == arr.shape[1:]
resize(out, out.shape[0] + arr.shape[0]) # elongate
arr.store(out)
return out
@convert.register(Iterator, Bag)
def bag_to_iterator(x, **kwargs):
return iter(x)
@convert.register(Bag, chunks(TextFile))
def bag_to_iterator(x, **kwargs):
return db.read_text([tf.path for tf in x])
@convert.register(Bag, list)
def bag_to_iterator(x, **kwargs):
return db.from_sequence(x, **filter_kwargs(db.from_sequence, kwargs))
@convert.register(dd.DataFrame, pd.DataFrame, cost=1.)
def pandas_dataframe_to_dask_dataframe(x, npartitions=None, **kwargs):
if npartitions is None:
raise ValueError("npartitions cannot be None")
return dd.from_pandas(x, npartitions=npartitions,
**filter_kwargs(dd.from_pandas, kwargs))
|
/sangreal-odo-0.0.5.tar.gz/sangreal-odo-0.0.5/odo/backends/dask.py
| 0.573917 | 0.286718 |
dask.py
|
pypi
|
from __future__ import absolute_import, division, print_function
import pymongo
from pymongo.collection import Collection
from collections import Iterator
from datashape import discover, DataShape, Record, var
from datashape.predicates import isdimension
from datashape.dispatch import dispatch
from toolz import take, partition_all, concat, pluck
import copy
from bson.objectid import ObjectId
import re
from ..convert import convert, ooc_types
from ..append import append
from ..resource import resource
@discover.register(Collection)
def discover_pymongo_collection(coll, n=50):
items = list(take(n, coll.find()))
if not items:
return var * Record([])
oid_cols = [k for k, v in items[0].items() if isinstance(v, ObjectId)]
for item in items:
for col in oid_cols:
del item[col]
ds = discover(items)
if isdimension(ds[0]):
return coll.count() * ds.subshape[0]
else:
raise ValueError("Consistent datashape not found")
def _into_iter_mongodb(coll, columns=None, dshape=None):
""" Into helper function
Return both a lazy sequence of tuples and a list of column names
"""
seq = coll.find()
if not columns and dshape:
columns = dshape.measure.names
elif not columns:
item = next(seq)
seq = concat([[item], seq])
columns = sorted(item.keys())
columns.remove('_id')
return columns, pluck(columns, seq)
@convert.register(Iterator, Collection, cost=500.0)
def collection_to_iterator(coll, columns=None, dshape=None, **kwargs):
columns, seq = _into_iter_mongodb(coll, columns=columns, dshape=dshape)
return seq
@append.register(Collection, Iterator)
def append_iterator_to_pymongo(coll, seq, columns=None, dshape=None, chunksize=1024, **kwargs):
seq = iter(seq)
item = next(seq)
seq = concat([[item], seq])
if isinstance(item, (tuple, list)):
if not columns and dshape:
columns = dshape.measure.names
if not columns:
raise ValueError("Inputs must be dictionaries. "
"Or provide columns=[...] or dshape=DataShape(...) keyword")
seq = (dict(zip(columns, item)) for item in seq)
for block in partition_all(1024, seq):
coll.insert(copy.deepcopy(block))
return coll
@append.register(Collection, object)
def append_anything_to_collection(coll, o, **kwargs):
return append(coll, convert(Iterator, o, **kwargs), **kwargs)
@resource.register(r'mongodb://\w*:\w*@\w*.*', priority=11)
def resource_mongo_with_authentication(uri, collection_name=None, **kwargs):
pattern = r'mongodb://(?P<user>\w*):(?P<pass>\w*)@(?P<hostport>.*:?\d*)/(?P<database>\w+)'
d = re.search(pattern, uri).groupdict()
return _resource_mongo(d, collection_name)
@resource.register(r'mongodb://.+')
def resource_mongo(uri, collection_name=None, **kwargs):
pattern = r'mongodb://(?P<hostport>.*:?\d*)/(?P<database>\w+)'
d = re.search(pattern, uri).groupdict()
return _resource_mongo(d, collection_name)
def _resource_mongo(d, collection_name=None):
client = pymongo.MongoClient(d['hostport'])
db = getattr(client, d['database'])
if d.get('user'):
db.authenticate(d['user'], d['pass'])
if collection_name is None:
return db
return getattr(db, collection_name)
@discover.register(pymongo.database.Database)
def discover_mongo_database(db):
names = db.collection_names()
return DataShape(Record(zip(names, (discover(getattr(db, name))
for name in names))))
ooc_types.add(Collection)
@dispatch(Collection)
def drop(m):
m.drop()
|
/sangreal-odo-0.0.5.tar.gz/sangreal-odo-0.0.5/odo/backends/mongo.py
| 0.608478 | 0.195364 |
mongo.py
|
pypi
|
from __future__ import absolute_import, division, print_function
import h5py
import os
import datashape
from datashape import DataShape, Record, to_numpy, discover
from datashape.predicates import isrecord
from datashape.dispatch import dispatch
import numpy as np
from toolz import keyfilter
from ..numpy_dtype import dshape_to_numpy
from ..append import append
from ..convert import convert, ooc_types
from ..create import create
from ..resource import resource
from ..chunks import chunks
h5py_attributes = ['chunks', 'compression', 'compression_opts', 'dtype',
'fillvalue', 'fletcher32', 'maxshape', 'shape']
try:
unicode_dtype = h5py.special_dtype(vlen=unicode)
except NameError:
unicode_dtype = h5py.special_dtype(vlen=str)
@discover.register((h5py.Group, h5py.File))
def discover_h5py_group_file(g):
return DataShape(Record([[k, discover(v)] for k, v in g.items()]))
def record_dshape_replace(dshape, old, new):
"""Recursively replace all instances of `old` with `new` in the record
dshape `dshape`.
Examples
--------
>>> from datashape import Record, string, object_, dshape
>>> ds = DataShape(Record([('a', 'int64'),
... ('b', 10 * Record([('c', 'object')])),
... ('d', 'int64')]))
...
>>> Record(list(record_dshape_replace(ds, object_, string)))
dshape("{a: int64, b: 10 * {c: object}, d: int64}")
"""
assert isrecord(dshape), 'input dshape must be a record'
for name, subshape in dshape.measure.fields:
if subshape == old:
yield name, new
else:
if isrecord(subshape):
yield record_dshape_replace(subshape, old, new)
else:
yield name, subshape
@discover.register(h5py.Dataset)
def discover_h5py_dataset(d):
dshape = datashape.from_numpy(d.shape, d.dtype)
shape, measure = dshape.shape, dshape.measure
if not isrecord(measure):
if dshape == datashape.object_:
args = shape + (datashape.string,)
return DataShape(*args)
return dshape
else:
records = list(record_dshape_replace(measure, datashape.object_,
datashape.string))
args = shape + (datashape.Record(records),)
return DataShape(*args)
def dtype_replace(dtype, old, new):
"""Replace the subdtype `old` in `subdtype` with `new`.
Parameters
----------
dtype, old, new : dtype
Examples
--------
>>> dt = np.dtype([('a', 'int64'), ('b', 'object'),
... ('c', [('d', 'object'), ('e', 'float64')])])
...
>>> r = np.dtype(list(dtype_replace(dt, 'int64', 'float64')))
>>> r
dtype([('a', '<f8'), ('b', 'O'), ('c', [('d', 'O'), ('e', '<f8')])])
"""
names = dtype.names
assert names is not None, 'dtype must be record-like'
for name, subdtype in zip(names, map(dtype.__getitem__, names)):
if subdtype == old:
yield name, new
else:
if subdtype.names is not None:
yield name, list(dtype_replace(subdtype, old, new))
else:
yield name, subdtype
def varlen_dtype(dt):
"""Inject variable length string element for object dtype
Examples
--------
>>> dt = np.dtype('object')
>>> dt
dtype('O')
>>> r = varlen_dtype(dt)
>>> r
dtype('O')
>>> r.metadata['vlen'] # doctest: +SKIP
<type 'unicode'>
>>> dt = np.dtype([('a', 'int64'), ('b', 'object'),
... ('c', [('d', 'object'), ('e', 'float64')])])
...
>>> dt['b'].metadata
>>> r = varlen_dtype(dt)
>>> r
dtype([('a', '<i8'), ('b', 'O'), ('c', [('d', 'O'), ('e', '<f8')])])
>>> r['b'].metadata['vlen'] # doctest: +SKIP
<type 'unicode'>
"""
if dt == np.object_:
return unicode_dtype
elif dt.names is None: # some kind of non record like dtype
return dt
else:
return np.dtype(list(dtype_replace(dt, np.dtype('object'),
unicode_dtype)))
def dataset_from_dshape(file, datapath, ds, **kwargs):
dtype = varlen_dtype(dshape_to_numpy(ds))
if datashape.var not in list(ds):
shape = tuple(map(int, ds.shape))
elif datashape.var not in list(ds)[1:]:
shape = (0,) + tuple(map(int, ds.shape[1:]))
else:
raise ValueError("Don't know how to handle varlen nd shapes")
if shape:
kwargs['chunks'] = kwargs.get('chunks', True)
kwargs['maxshape'] = kwargs.get('maxshape', (None,) + shape[1:])
kwargs2 = keyfilter(h5py_attributes.__contains__, kwargs)
return file.require_dataset(datapath, shape=shape, dtype=dtype, **kwargs2)
def create_from_datashape(group, ds, name=None, **kwargs):
if not isrecord(ds):
raise ValueError(
"Trying to create an HDF5 file with non-record datashape failed\n"
"Perhaps you forgot to specify a datapath?\n"
"\tdshape: %s\n"
"If you're using odo consider the following change\n"
"\tBefore: odo(data, 'myfile.hdf5')\n"
"\tAfter: odo(data, 'myfile.hdf5::/datapath')" % ds)
if isinstance(ds, DataShape) and len(ds) == 1:
ds = ds[0]
for name, sub_ds in ds.dict.items():
if isrecord(sub_ds):
g = group.require_group(name)
create_from_datashape(g, sub_ds, **kwargs)
else:
dataset_from_dshape(file=group.file,
datapath='/'.join([group.name, name]),
ds=sub_ds, **kwargs)
@create.register(h5py.File)
def create_h5py_file(cls, path=None, dshape=None, **kwargs):
f = h5py.File(path)
create_from_datashape(f, dshape, **kwargs)
return f
@append.register(h5py.Dataset, np.ndarray)
def append_h5py(dset, x, **kwargs):
if not sum(x.shape):
return dset
shape = list(dset.shape)
shape[0] += len(x)
dset.resize(shape)
dset[-len(x):] = x
return dset
@append.register(h5py.Dataset, chunks(np.ndarray))
def append_h5py(dset, c, **kwargs):
for chunk in c:
append(dset, chunk)
return dset
@append.register(h5py.Dataset, object)
def append_h5py(dset, x, **kwargs):
return append(dset, convert(chunks(np.ndarray), x, **kwargs), **kwargs)
@convert.register(np.ndarray, h5py.Dataset, cost=3.0)
def h5py_to_numpy(dset, force=False, **kwargs):
if dset.size > 1e9:
raise MemoryError(("File size is large: %0.2f GB.\n"
"Convert with flag force=True to force loading") %
(dset.size / 1e9))
else:
return dset[:]
@convert.register(chunks(np.ndarray), h5py.Dataset, cost=3.0)
def h5py_to_numpy_chunks(dset, chunksize=2 ** 20, **kwargs):
def load():
for i in range(0, dset.shape[0], chunksize):
yield dset[i: i + chunksize]
return chunks(np.ndarray)(load)
@resource.register('h5py://.+', priority=11)
def resource_h5py(uri, datapath=None, dshape=None, expected_dshape=None,
**kwargs):
if uri.startswith('h5py://'):
uri = uri[len('h5py://'):]
f = h5py.File(uri)
olddatapath = datapath
if datapath is not None and datapath in f:
old_dset = f[datapath]
if expected_dshape is not None:
dshape = expected_dshape
assert dshape == discover(old_dset)
if dshape is not None:
ds = datashape.dshape(dshape)
if datapath:
while ds and datapath:
datapath, name = datapath.rsplit('/', 1)
ds = Record([[name, ds]])
ds = datashape.dshape(ds)
f.close()
f = create(h5py.File, path=uri, dshape=ds, **kwargs)
if olddatapath:
return f[olddatapath]
else:
return f
@resource.register(r'^(?!hdfstore).+\.(hdf5|h5)', priority=10)
def resource_hdf5(uri, *args, **kwargs):
return resource_h5py(uri, *args, **kwargs)
@dispatch((h5py.Group, h5py.Dataset))
def drop(h):
del h.file[h.name]
@dispatch(h5py.File)
def drop(h):
fn = h.filename
h.close()
os.remove(fn)
ooc_types.add(h5py.Dataset)
|
/sangreal-odo-0.0.5.tar.gz/sangreal-odo-0.0.5/odo/backends/h5py.py
| 0.637031 | 0.327265 |
h5py.py
|
pypi
|
import datetime as dt
from functools import lru_cache
import pandas as pd
from sangreal_wind.sangreal_calendar import adjust_trade_dt
from sangreal_wind.utils.engines import WIND_DB
@lru_cache()
def get_index_weight_all(index):
index = '399300.SZ' if index == '000300.SH' else index
table = getattr(WIND_DB, 'AIndexHS300FreeWeight'.upper())
df = WIND_DB.query(
table.S_CON_WINDCODE, table.I_WEIGHT,
table.TRADE_DT).filter(table.S_INFO_WINDCODE == index).order_by(
table.TRADE_DT.desc()).to_df()
df.columns = ['sid', 'weight', 'trade_dt']
df.weight = df.weight / 100.0
return df
def get_index_weight(index, trade_dt=None):
"""[获取指数成份权重]
Arguments:
index {[str]} -- [windcode of index]
Keyword Arguments:
trade_dt {[str or datetime]} -- [trade_dt] (default: {None})
Returns:
[pd.DataFrame] -- [sid, weight]
"""
if trade_dt is None:
trade_dt = dt.date.today()
trade_dt = adjust_trade_dt(trade_dt)
df = get_index_weight_all(index).copy()
df = df[(df['trade_dt'] <= trade_dt)]
if df.empty:
return pd.DataFrame()
# 取出最近一个交易日
t = df.trade_dt.iloc[0]
df = df[df['trade_dt'] == t]
df.drop(['trade_dt'], axis=1, inplace=True)
if t != trade_dt:
table = getattr(WIND_DB, 'AShareEODPrices'.upper())
q = WIND_DB.query(table.S_INFO_WINDCODE.label(
'sid'), table.S_DQ_ADJCLOSE).filter(table.S_INFO_WINDCODE.in_(df.sid))
c1 = q.filter(table.TRADE_DT == t).to_df().set_index('sid').iloc[:, 0]
c2 = q.filter(table.TRADE_DT == trade_dt).to_df(
).set_index('sid').iloc[:, 0]
adjust_factor = c2 / c1
df.set_index('sid', inplace=True)
df['weight'] = df['weight'] * adjust_factor
df['weight'] /= df['weight'].sum()
df.reset_index(inplace=True)
return df.reset_index(drop=True)
if __name__ == '__main__':
df = get_index_weight('000300.SH')
print(df.head())
|
/sangreal-wind-0.0.76.tar.gz/sangreal-wind-0.0.76/sangreal_wind/api/get_index_weight.py
| 0.458834 | 0.309402 |
get_index_weight.py
|
pypi
|
import attr
import pandas as pd
from functools import lru_cache
from sangreal_wind.api.get_index_weight import get_index_weight
from sangreal_wind.utils.commons import INDEX_DICT
from sangreal_wind.utils.datetime_handle import dt_handle
from sangreal_wind.utils.engines import WIND_DB
indx_error = f"请输入正确的指数简称,如{list(INDEX_DICT.keys())},或指数wind代码!"
def universe_A(cur_sign=True):
"""[返回最新全A成份股]
Keyword Arguments:
cur_sign {bool} -- [是否需要最新的股票池] (default: {True})
Returns:
[set] -- [set of stk code]
"""
table = WIND_DB.AINDEXMEMBERSWIND
query = WIND_DB.query(table.S_CON_WINDCODE).filter(
table.F_INFO_WINDCODE == '881001.WI')
if cur_sign:
df = query.filter(table.CUR_SIGN == '1').to_df()
else:
df = query.to_df()
df.columns = ['sid']
return set(df.sid)
def universe_normal(indx, cur_sign=True):
"""[返回指数的最新份股]
Arguments:
indx {[str]} -- [wind code of index]
cur_sign {bool} -- [是否需要最新的股票池] (default: {True})
Raises:
ValueError -- [description]
ValueError -- [description]
Returns:
[set] -- [set of stk code]
"""
try:
indx = INDEX_DICT[indx]
except KeyError:
if '.' not in indx:
raise ValueError(indx_error)
table = getattr(WIND_DB, 'AIndexMembers'.upper())
query = WIND_DB.query(table.S_CON_WINDCODE).filter(
table.S_INFO_WINDCODE == indx)
if cur_sign:
df = query.filter(
table.CUR_SIGN == '1', ).to_df()
else:
df = query.to_df()
df.columns = ['sid']
if df.empty:
raise ValueError(indx_error)
return set(df.sid)
def universe_msci(cur_sign=True):
"""[返回MSCI最新成分股]
Arguments:
cur_sign {bool} -- [是否需要最新的股票池] (default: {True})
Returns:
[set] -- [set of stk code]
"""
table = getattr(WIND_DB, 'AshareMSCIMembers'.upper())
query = WIND_DB.query(
table.S_INFO_WINDCODE)
if cur_sign:
df = query.filter(table.CUR_SIGN == '1').to_df()
else:
df = query.to_df()
df.columns = ['sid']
return set(df.sid)
def Universe(indx, cur_sign=True):
"""[返回指数的最新成分股]
Arguments:
indx {[str]} -- [wind code of index or abbrev]
cur_sign {bool} -- [是否需要最新的股票池] (default: {True})
Returns:
[set] -- [set of stk code]
"""
if indx == 'MSCI':
return universe_msci(cur_sign=cur_sign)
elif indx == 'A':
return universe_A(cur_sign=cur_sign)
else:
return universe_normal(indx, cur_sign=cur_sign)
@lru_cache()
def get_all_normal_index(index):
table = getattr(WIND_DB, 'AIndexMembers'.upper())
df = WIND_DB.query(
table.S_CON_WINDCODE.label(
'sid'), table.S_CON_INDATE.label('entry_dt'),
table.S_CON_OUTDATE.label('out_dt')).filter(table.S_INFO_WINDCODE == index).to_df()
return df
@lru_cache()
def get_all_msci():
table = getattr(WIND_DB, 'AshareMSCIMembers'.upper())
df = WIND_DB.query(table.S_INFO_WINDCODE.label('sid'),
table.ENTRY_DT.label('entry_dt'), table.REMOVE_DT.label('out_dt')).to_df()
return df
@lru_cache()
def get_all_stk():
table = getattr(WIND_DB, 'AIndexMembersWind'.upper())
df = WIND_DB.query(table.S_CON_WINDCODE.label('sid'), table.S_CON_INDATE.label('entry_dt'),
table.S_CON_OUTDATE.label('out_dt')).filter(
table.F_INFO_WINDCODE == '881001.WI').to_df()
return df
@lru_cache()
def get_all_hk(index):
table = getattr(WIND_DB, 'HKSTOCKINDEXMEMBERS'.upper())
df = WIND_DB.query(table.S_CON_WINDCODE.label('sid'), table.S_CON_INDATE.label('entry_dt'),
table.S_CON_OUTDATE.label('out_dt')).filter(
table.S_INFO_WINDCODE == index).to_df()
return df
@lru_cache()
def get_all_bond():
table = getattr(WIND_DB, 'CBINDEXMEMBERS'.upper())
df = WIND_DB.query(table.S_CON_WINDCODE.label('sid'), table.S_CON_INDATE.label('entry_dt'),
table.S_CON_OUTDATE.label('out_dt')).filter(
table.S_INFO_WINDCODE == "931078.CSI").to_df()
return df
@attr.s
class DynamicUniverse:
"""[get stock_list of universe on trade_dt]
Raises:
ValueError -- [description]
Returns:
[set] -- [description]
"""
indx = attr.ib()
index = attr.ib(init=False)
members = attr.ib(default=None)
@indx.validator
def check(self, attribute, value):
if value not in INDEX_DICT.keys():
if '.' not in value:
raise ValueError(indx_error)
def __attrs_post_init__(self):
try:
self.index = INDEX_DICT[self.indx]
except KeyError:
self.index = self.indx
def preview(self, trade_dt):
if isinstance(self.members, pd.DataFrame):
df = self.members.copy()
elif self.indx == 'MSCI':
df = get_all_msci()
elif self.indx == 'A':
df = get_all_stk()
elif self.indx == 'CBOND':
df = get_all_bond()
elif self.index.endswith('HI'):
df = get_all_hk(self.index)
elif self.index != '':
df = get_all_normal_index(self.index)
trade_dt = dt_handle(trade_dt)
df = df.loc[(df['entry_dt'] <= trade_dt) & (
(df['out_dt'] >= trade_dt) | (df['out_dt'].isnull()))]
return set(df.sid)
if __name__ == '__main__':
f_list = DynamicUniverse('HS300').preview('20180105')
print(len(f_list))
|
/sangreal-wind-0.0.76.tar.gz/sangreal-wind-0.0.76/sangreal_wind/api/get_universe.py
| 0.489015 | 0.401512 |
get_universe.py
|
pypi
|
from sangreal_wind.sangreal_calendar import Monthly, step_trade_dt
from sangreal_wind.utils.datetime_handle import dt_handle
from sangreal_wind.utils.engines import WIND_DB
# 月度类
MONTH = Monthly(-1)
def get_daily_ret(
sid=None,
trade_dt=None,
begin_dt='20030101',
end_dt='20990101',
universe='A',
):
"""[get daily_ret of stocks,]
Keyword Arguments:
sid {[sid or iterable]} -- [stock windcode] (default: {None})
begin_dt {str or datetime} -- [begin_dt] (default: {'20030101'})
end_dt {str or datetime} -- [end_dt] (default: {'20990101'})
trade_dt {[str or datetime]} -- [trade_dt] (default: {None})
Returns:
ret {pd.DataFrame} -- [sid: trade_dt]
"""
if universe not in ('A', 'HK'):
raise ValueError('universe only support A or HK!')
begin_dt, end_dt = dt_handle(begin_dt), dt_handle(end_dt)
if universe == 'A':
table = getattr(WIND_DB, 'AShareEODPrices'.upper())
else:
table = getattr(WIND_DB, 'HKSHAREEODPRICES'.upper())
query = WIND_DB.query(table.S_INFO_WINDCODE, table.TRADE_DT,
table.S_DQ_ADJCLOSE)
if sid is not None:
if isinstance(sid, str):
query = query.filter(table.S_INFO_WINDCODE == sid)
else:
query = query.filter(table.S_INFO_WINDCODE.in_(sid))
if trade_dt is not None:
begin_dt = end_dt = dt_handle(trade_dt)
df = query.filter(
table.TRADE_DT >= step_trade_dt(begin_dt, -1), table.TRADE_DT <= end_dt).order_by(
table.TRADE_DT).to_df()
df.columns = ['sid', 'trade_dt', 'pct_change']
df = df.pivot(values='pct_change', index='trade_dt', columns='sid')
# # 防止出现0的情况,强制缺失na
df = df.pct_change(fill_method=None)
df.dropna(how='all', inplace=True)
return df.T
def get_monthly_ret(
sid=None,
trade_dt=None,
begin_dt='20030101',
end_dt='20990101',
):
"""[get monthly_ret of stocks,]
Keyword Arguments:
sid {[sid or iterable]} -- [stock windcode] (default: {None})
begin_dt {str or datetime} -- [begin_dt] (default: {'20030101'})
end_dt {str or datetime} -- [end_dt] (default: {'20990101'})
trade_dt {[str or datetime]} -- [trade_dt] (default: {None})
Returns:
ret {pd.DataFrame} -- [sid: trade_dt]
"""
begin_dt, end_dt = dt_handle(begin_dt), dt_handle(end_dt)
table = getattr(WIND_DB, 'ASHAREMONTHLYYIELD'.upper())
query = WIND_DB.query(table.S_INFO_WINDCODE, table.TRADE_DT,
table.S_MQ_PCTCHANGE)
if sid is not None:
if isinstance(sid, str):
query = query.filter(table.S_INFO_WINDCODE == sid)
else:
query = query.filter(table.S_INFO_WINDCODE.in_(sid))
if trade_dt is not None:
trade_dt = MONTH.prev(trade_dt)
df = query.filter(table.TRADE_DT == trade_dt).order_by(
table.TRADE_DT).to_df()
else:
df = query.filter(
table.TRADE_DT >= begin_dt, table.TRADE_DT <= end_dt).order_by(
table.TRADE_DT).to_df()
df.columns = ['sid', 'trade_dt', 'close']
df.close = df.close / 100.0
df = df.pivot(values='close', index='trade_dt', columns='sid')
df.dropna(how='all', inplace=True)
return df.T
if __name__ == '__main__':
# df = get_daily_ret(begin_dt='20181101')
# print(df.head())
df = get_daily_ret(begin_dt='20180101', end_dt='20181223')
print(df)
|
/sangreal-wind-0.0.76.tar.gz/sangreal-wind-0.0.76/sangreal_wind/api/get_ret.py
| 0.431464 | 0.35095 |
get_ret.py
|
pypi
|
import re
from functools import lru_cache
from sqlalchemy import func
from sqlalchemy.exc import OperationalError
from sangreal_wind.utils import dt_handle
from sangreal_wind.utils.engines import WIND_DB
class DynamicIndustry:
def __init__(self, ind=None):
self.ind = ind
def preview(self, trade_dt, adjust=True):
# adjust {bool} -- [由于中信变更行业分类,是否调整兼容之前的代码] (default: {True})
all_stk = get_industry(trade_dt=trade_dt, level=1, sid=None, adjust=adjust)
if self.ind is not None:
return set(all_stk[all_stk['ind'] == self.ind].index)
return set(all_stk.index)
def get_industry(trade_dt, sid=None, level=1, adjust=True):
"""[get industry of stock 中信行业]
Arguments:
trade_dt {[str or datetime]} -- [trade_dt]
Keyword Arguments:
sid {[str or iterable]} -- [sids of stocks] (default: {None})
level {int} -- [level of zx industry] (default: {1})
adjust {bool} -- [由于中信变更行业分类,是否调整兼容之前的代码] (default: {True})
Returns:
[pd.DataFrame] -- [sid: ind]
"""
trade_dt = dt_handle(trade_dt)
df = get_industry_all(level=level, adjust=adjust)
if sid is not None:
sid = {sid} if isinstance(sid, str) else set(sid)
df = df[df['sid'].isin(sid)]
df = df.loc[(df['entry_dt'] <= trade_dt) & (
(df['out_dt'] >= trade_dt) | (df['out_dt'].isnull()))].copy()
return df.set_index('sid')[['ind']]
@lru_cache()
def get_industry_all(level=1, adjust=True):
"""[summary]
adjust {bool} -- [由于中信变更行业分类,是否调整兼容之前的代码] (default: {True})
"""
clss = WIND_DB.ASHAREINDUSTRIESCLASSCITICS
ind_code = WIND_DB.ASHAREINDUSTRIESCODE
df = WIND_DB.query(
clss.S_INFO_WINDCODE, clss.ENTRY_DT, clss.REMOVE_DT,
ind_code.INDUSTRIESNAME).filter(ind_code.LEVELNUM == (level + 1))
try:
df = df.filter(
func.substring(clss.CITICS_IND_CODE, 1, 2 + 2 * level) == func.
substring(ind_code.INDUSTRIESCODE, 1, 2 + 2 * level)).to_df()
except:
df = df.filter(
func.substr(clss.CITICS_IND_CODE, 1, 2 + 2 * level) == func.substr(
ind_code.INDUSTRIESCODE, 1, 2 + 2 * level)).to_df()
df.columns = ['sid', 'entry_dt', 'out_dt', 'ind']
# 去除行业中的罗马数字
p = re.compile(r"[^\u4e00-\u9fa5]")
df.ind = df.ind.str.replace(p, '', regex=True)
# 将综合金融放入非银行金融内
if adjust:
def ind_map(x):
if x == '综合金融':
return '非银行金融'
elif x in ('多领域控股', '资产管理', '新兴金融服务'):
return '多元金融'
elif x in ('全国性股份制银行', '区域性银行'):
return '股份制与城商行'
else:
return x
df.ind = df.ind.map(ind_map)
return df
def get_industry_sp(trade_dt, sid=None, split=['银行', '非银行金融', '综合金融'], adjust=True):
"""[将split中部分中信一级行业转换为相应的二级行业]
Arguments:
trade_dt {[str]} -- [description]
Keyword Arguments:
sid {[str or iterable]} -- [sids of stocks] (default: {None})
split {list} -- [industry which convert level1 to level2] (default: {['银行', '非银行金融']})
adjust {bool} -- [由于中信变更行业分类,是否调整兼容之前的代码] (default: {True})
Returns:
[pd.DataFrame] -- [sid: ind]
"""
trade_dt = dt_handle(trade_dt)
df = get_industry_all(level=1, adjust=adjust)
if sid is not None:
sid = {sid} if isinstance(sid, str) else set(sid)
df = df[df['sid'].isin(sid)]
df = df.loc[(df['entry_dt'] <= trade_dt) & (
(df['out_dt'] >= trade_dt) | (df['out_dt'].isnull()))].copy()
split_sid = df[df['ind'].isin(split)]
normal_sid = df[~(df['ind'].isin(split))]
df1 = get_industry_all(level=2, adjust=adjust)
df1 = df1[df1['sid'].isin(split_sid.sid)]
df1 = df1.loc[(df1['entry_dt'] <= trade_dt) & (
(df1['out_dt'] >= trade_dt) | (df1['out_dt'].isnull()))].copy()
# 将一级和二级合并
df = normal_sid.append(df1, ignore_index=True)
return df.set_index('sid')[['ind']]
if __name__ == '__main__':
print(len(DynamicIndustry().preview('20180101')))
|
/sangreal-wind-0.0.76.tar.gz/sangreal-wind-0.0.76/sangreal_wind/api/get_industry.py
| 0.452778 | 0.335569 |
get_industry.py
|
pypi
|
from sangreal_wind.utils.engines import WIND_DB
from sangreal_wind.utils.fund_type import FUND_TYPE
from functools import lru_cache
from collections import Iterable
FUND_TYPE_LEVEL0 = ['股票型基金', '混合型基金', '债券型基金']
@lru_cache()
def get_fund_list():
table0 = getattr(WIND_DB, 'ChinaMutualFundSector'.upper())
table1 = getattr(WIND_DB, 'AShareIndustriesCode'.upper())
df = WIND_DB.query(table0.F_INFO_WINDCODE, table0.S_INFO_SECTORENTRYDT,
table0.S_INFO_SECTOREXITDT,
table1.INDUSTRIESNAME).filter(
table0.S_INFO_SECTOR == table1.INDUSTRIESCODE,
table0.S_INFO_SECTORENTRYDT != None,
table0.S_INFO_SECTOREXITDT == None).to_df()
df.columns = [c.lower() for c in df.columns]
df = df[df['INDUSTRIESNAME'.lower()].isin(FUND_TYPE)]
df.columns = ['sid', 'entry_dt', 'exit_dt', 'fund_type']
return df
def get_fund_filter(fundtype='all'):
"""[选取同一类型下的基金]
Keyword Arguments:
fundtype {str} -- [基金类型] (default: {'all'})
Raises:
ValueError -- [description]
Returns:
[pd.Series] -- [Series of fund]
"""
df = get_fund_list()
if fundtype == 'all':
return df.sid
elif fundtype == '股票型':
return df[df['fund_type'].isin((
'普通股票型基金',
'被动指数型基金',
'增强指数型基金',
))].sid
elif fundtype == '混合型':
return df[df['fund_type'].isin((
'偏股混合型基金',
'平衡混合型基金',
'偏债混合型基金',
'灵活配置型基金',
))].sid
elif fundtype == '债券型':
return df[df['fund_type'].isin((
'中长期纯债型基金',
'短期纯债型基金',
'混合债券型一级基金',
'混合债券型二级基金',
'被动指数型债券基金',
'增强指数型债券基金',
))].sid
elif isinstance(fundtype, str):
tmp_f = fundtype.rstrip('基金') + '基金'
return df[df['fund_type'] == tmp_f].sid
elif isinstance(fundtype, Iterable):
tmp_fundtype = [f.rstrip('基金') + '基金' for f in fundtype]
return df[df['fund_type'].isin(tmp_fundtype)].sid
else:
raise ValueError(f'请输入正确的基金类型! 如{FUND_TYPE_LEVEL0 + FUND_TYPE}')
if __name__ == '__main__':
print(get_fund_filter('all').head())
print(get_fund_filter('债券型').head())
print(get_fund_filter('中长期纯债型基').head())
print(get_fund_filter(['中长期纯债型基', '中长期纯债型基']).head())
|
/sangreal-wind-0.0.76.tar.gz/sangreal-wind-0.0.76/sangreal_wind/api/get_fund_list.py
| 0.44071 | 0.27541 |
get_fund_list.py
|
pypi
|
import os
import logging
import copy
import torch
from typing import Any, Dict, List, Optional, Text, Tuple, Type, Callable
from sani_nlu.utils import initializeFolder, download_model, is_duplicated, is_overlap
from rasa.nlu.components import Component
from rasa.nlu.extractors.extractor import EntityExtractor
from rasa.nlu.config import RasaNLUModelConfig
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from sani_nlu.constants import THRESHOLD
logger = logging.getLogger(__name__)
class OverlapExtractor(EntityExtractor):
name = "OverlapExtractor"
def __init__(
self,
component_config: Optional[Dict[Text, Any]] = None,
learner = None,
) -> None:
super(OverlapExtractor, self).__init__(component_config)
def train(
self,
training_data: TrainingData,
config: Optional[RasaNLUModelConfig] = None,
**kwargs: Any,
) -> None:
"""Train this component.
This is the components chance to train itself provided
with the training data. The component can rely on
any context attribute to be present, that gets created
by a call to :meth:`components.Component.pipeline_init`
of ANY component and
on any context attributes created by a call to
:meth:`components.Component.train`
of components previous to this one."""
pass
def process(self, message: Message, **kwargs: Any) -> None:
"""Process an incoming message."""
text = message.data.get('text')
#intent = message.data.get('intent')
if text:
old_entities = message.get("entities", [])
regex_extractor = [x for x in old_entities if x.get("extractor") == "RegexEntityExtractor"]
flair_extractor = [x for x in old_entities if x.get("extractor") == "FlairExtractor"]
diet_classifier = [x for x in old_entities if x.get("extractor") == "DIETClassifier"]
new_entities = []
# regex_extractor priority 1
new_entities += regex_extractor
# flair_extractor priority 2
for e1 in flair_extractor:
ok = True
for e2 in new_entities:
if is_duplicated(e1, e2) or is_overlap(e1, e2):
ok = False
break
if ok and e1.get("confidence") >= THRESHOLD:
new_entities.append(e1)
# diet_classifier priority 2
for e1 in diet_classifier:
ok = True
for e2 in new_entities:
if is_duplicated(e1, e2) or is_overlap(e1, e2):
ok = False
break
if ok and e1.get("confidence_entity") >= THRESHOLD:
new_entities.append(e1)
message.set("entities", new_entities, add_to_output=True)
def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]:
"""Persist this component to disk for future loading."""
pass
|
/sani_nlu-1.0.3-py3-none-any.whl/sani_nlu/extractors/overlap_extractor.py
| 0.779909 | 0.171859 |
overlap_extractor.py
|
pypi
|
import os
import logging
import copy
import torch
from typing import Any, Dict, List, Optional, Text, Tuple, Type, Callable
from sani_nlu.utils import initializeFolder, download_model, is_duplicated, is_overlap
from rasa.nlu.components import Component
from rasa.nlu.extractors.extractor import EntityExtractor
from rasa.nlu.config import RasaNLUModelConfig
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from flair.models import SequenceTagger
from flair.data import Sentence
if torch.cuda.is_available():
torch.cuda.empty_cache()
logger = logging.getLogger(__name__)
class FlairExtractor(EntityExtractor):
name = "FlairExtractor"
def __init__(
self,
component_config: Optional[Dict[Text, Any]] = None,
learner = None,
) -> None:
super(FlairExtractor, self).__init__(component_config)
self.learner = learner
initializeFolder()
def train(
self,
training_data: TrainingData,
config: Optional[RasaNLUModelConfig] = None,
**kwargs: Any,
) -> None:
"""Train this component.
This is the components chance to train itself provided
with the training data. The component can rely on
any context attribute to be present, that gets created
by a call to :meth:`components.Component.pipeline_init`
of ANY component and
on any context attributes created by a call to
:meth:`components.Component.train`
of components previous to this one."""
pass
def process(self, message: Message, **kwargs: Any) -> None:
"""Process an incoming message."""
text = message.data.get('text')
#intent = message.data.get('intent')
if text:
sentence = Sentence(text)
self.learner.predict(sentence)
result = sentence.to_dict(tag_type='ner')
entities = []
for e in result.get("entities"):
if e.get("labels")[0].value == "LOCATION":
entity = {}
entity["value"] = e.get("text")
entity["start"] = e.get("start_pos")
entity["end"] = e.get("end_pos")
entity["confidence"] = e.get("labels")[0].score
entity["entity"] = "location"
entity["extractor"] = "FlairExtractor"
entities.append(entity)
message.set("entities", message.get("entities", []) + entities, add_to_output=True)
def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]:
"""Persist this component to disk for future loading."""
pass
@classmethod
def load(
cls,
meta: Dict[Text, Any],
model_dir: Text,
model_metadata: Optional["Metadata"] = None,
cached_component: Optional["Component"] = None,
**kwargs: Any,
) -> "Component":
"""Load this component from file."""
MODEL_PATH = download_model()
if not os.path.isfile(MODEL_PATH):
logger.error(f"File not found. Cannot load Flair Extractor model: {MODEL_PATH}")
return cls(component_config=meta)
else:
try:
learner = SequenceTagger.load(MODEL_PATH)
logger.debug(f"Load Flair Extractor model successfully ")
return cls(meta, learner)
except Exception as ex:
logger.error(f"Cannot load Flair Extractor model: {MODEL_PATH}: error: {ex}")
|
/sani_nlu-1.0.3-py3-none-any.whl/sani_nlu/extractors/flair_extractor.py
| 0.84572 | 0.165425 |
flair_extractor.py
|
pypi
|
from decimal import Decimal
from importlib import import_module
from pathlib import Path
from typing import Dict, List, Optional
import orjson
from sanic import Blueprint, Request, Sanic
from sanic.blueprint_group import BlueprintGroup
from sanic.exceptions import ServerError as SanicServerError
def getpath_by_root(path: str) -> Path:
"""
根据根目录获取路径
基于 os.getcwd() 的同级路径、父目录来获取
Args:
path: 相对server的子路径
Returns:
完整路径
"""
return (Path.cwd() / path).absolute()
def json_dumps(data: dict, default=None) -> str:
"""
调用orjson进行dumps
Args:
data: 数据
default: 数量处理方法
Returns:
返回json字符串
"""
def _default(item):
if isinstance(item, Decimal):
return float(item.to_eng_string())
json_bytes = orjson.dumps(
data,
default=default or _default,
option=orjson.OPT_APPEND_NEWLINE | orjson.OPT_INDENT_2,
)
return json_bytes.decode("utf-8")
def get_current_request() -> Optional[Request]:
""" "
获取当前请求
"""
try:
return Request.get_current()
except SanicServerError:
return None
def auto_blueprint(sanic_app: Sanic, base_api_module_name: str) -> None:
"""
自动生成蓝图
Args:
sanic_app: app
base_api_module_name: api层模块名称
Returns:
"""
# 导入base_api_module_name模块并获取其文件夹路径
base_api_dir: Path = Path.cwd() / base_api_module_name
# 创建根API蓝图组
root_group: BlueprintGroup = BlueprintGroup(base_api_module_name)
blueprint_group_map: Dict[str, BlueprintGroup] = {}
# 遍历所有__init__.py文件,查找蓝图并创建对应的蓝图组
init_files: List[Path] = list(base_api_dir.glob("**/__init__.py"))
for init_file in reversed(init_files):
# 忽略__init__.py
init_file: Path = init_file.parent
# 获取该蓝图所在的模块路径和名称
module_path: str = init_file.relative_to(base_api_dir.parent).with_suffix("").as_posix()
module_name: str = module_path.replace("/", ".")
# 导入蓝图所在的模块,并获取该模块下的所有蓝图
module = import_module(module_name, base_api_module_name)
blueprints = [getattr(module, attr) for attr in dir(module) if isinstance(getattr(module, attr), Blueprint)]
# 拆分模块路径,创建对应的蓝图组并添加到父级蓝图组中
parts = [path for path in module_path.split("/") if path not in [base_api_module_name, init_file.name]]
if len(blueprints) == 1:
blueprint = blueprints[0]
if not parts:
blueprint_group = blueprint_group_map.get(init_file.name)
if blueprint_group:
blueprint.url_prefix = ""
blueprint_group.append(blueprint)
root_group.append(blueprint_group)
else:
root_group.append(blueprint)
else:
for part in parts:
group = blueprint_group_map.get(part, BlueprintGroup(part))
group.append(blueprint)
blueprint_group_map[part] = group
else:
group = BlueprintGroup(init_file.name)
group.extend(blueprints)
root_group.append(group)
# 将根API蓝图组添加到应用中
sanic_app.blueprint(root_group)
|
/sanic_api-0.2.7-py3-none-any.whl/sanic_api/utils.py
| 0.584034 | 0.189915 |
utils.py
|
pypi
|
# Sanic with attrs towards Swagger 2.0 / OpenAPI support
Supercharge your [Sanic](https://github.com/channelcat/sanic>) app with:
- [attrs](http://www.attrs.org/)
- [Swagger](https://swagger.io/docs/specification/2-0/basic-structure/)
**Note**: This is a fork of Sanic OpenAPI implementation from [@channelcat](https://github.com/channelcat), which I like a lot but it lacks some of the functionality I wanted (and I also went sideways by using a third-party lib ([`attrs`](http://www.attrs.org/)) as default for modeling input / output model classes).
[](https://pypi.python.org/pypi/sanic-attrs/)
[](https://pypi.python.org/pypi/sanic-attrs/)
## Super quick introduction
Give your Sanic API an UI and OpenAPI documentation, all for the price of free!

## Installation
**Attention**: since this fork came from a necessity of mine, a lot of features I want to implement are still not available, hence the status of `pre-alpha` to this library! Also, _don't try the examples folder_, it was not converted (yet)! Shame on me ...
```shell
pip install sanic-attrs
```
Add OpenAPI and Swagger UI:
```python
from sanic_attrs import swagger_blueprint, openapi_blueprint
app.blueprint(openapi_blueprint)
app.blueprint(swagger_blueprint)
```
You'll now have a Swagger UI at the URL `/swagger`. Your routes will be automatically categorized by their blueprints. This is the default usage, but more advanced usage can be seen. Keep reading!
_Note_: the `swagger_blueprint` is awesome but sometimes you don't want it open-wide for whatever reason you have (security, etc), so you can make it available only if running with `debug=True`, for example. That's how I actually use it :smile:
## [typing](https://docs.python.org/3/library/typing.html)
Since `sanic-attrs` is, of course, based on `attrs` and the Python target version is 3.5+, most of the typing definitions for your model will be made entirely using Python types, either global ones or from the `typing` library. Also, `enums` are supported as well! :sparkles:
Here's the types supported (so far):
- `int`
- `float`
- `str`
- `bool`
- `date`
- `datetime`
- `bytes`
- `typing.Any`
- `typing.Collection`
- `typing.Dict`
- `typing.Iterable`
- `typing.List`
- `typing.Mapping`
- `typing.Optional`
- `typing.Sequence`
- `typing.Set`
- `typing.Union`
**A note on `list` and `dict`**: Please, use `typing.List` and `typing.Dict` for this.
## Usage
### Use simple decorators to document routes
```python
from sanic_attrs import doc
@app.get("/user/<user_id:int>")
@doc.summary("Fetches a user by ID")
@doc.produces(SomeOutputModel)
async def get_user(request, user_id):
...
@app.post("/user")
@doc.summary("Creates a user")
@doc.consumes(SomeInputModel, location="body")
async def create_user(request):
...
```
### Model your input/output
Yes, in this version you **need** to be descriptive :wink:
```python
import typing
from sanic_attrs import doc
class Car(doc.Model):
make: str = doc.field(description="Who made the car")
model: str = doc.field(description="Type of car. This will vary by make")
year: int = doc.field(description="4-digit year of the car", required=False)
class Garage(doc.Model):
spaces: int = doc.field(description="How many cars can fit in the garage")
cars: typing.List[Car] = doc.field(description="All cars in the garage")
@app.get("/garage")
@doc.summary("Gets the whole garage")
@doc.produces(Garage)
async def get_garage(request):
return json({
"spaces": 2,
"cars": [{"make": "Nissan", "model": "370Z"}]
})
```
### Advanced usage
Since `doc.Model` and `doc.field` are nothing more as syntatic sugar for the `@attr.s` decorator and `attr.ib` function, you can express your models using these provided classes and methods or use vanilla `attrs` in your models. Here's a complex example that shows a mixed model:
```python
from enum import Enum, IntEnum
from typing import (Any, Collection, Dict, Iterable, List, Mapping, Optional,
Sequence, Set, Union)
import attr
from sanic_attrs import doc
class PlatformEnum(str, Enum):
XBOX1 = "XBOX1"
PLAYSTATION4 = "PLAYSTATION4"
PC = "PC"
class LanguageEnum(IntEnum):
ENGLISH = 1
JAPANESE = 2
SPANISH = 3
GERMAN = 4
PORTUGUESE = 5
class Something(doc.Model):
some_name: str = doc.field(description="Something name")
@attr.s
class AnotherSomething:
another_name: str = attr.ib(metadata={"description": "Another field"})
class Game(doc.Model):
name: str = doc.field(description="The name of the game")
platform: PlatformEnum = doc.field(description="Which platform it runs on")
score: float = doc.field(description="The average score of the game")
resolution_tested: str = doc.field(description="The resolution which the game was tested")
genre: List[str] = doc.field(description="One or more genres this game is part of")
genre_extra: Sequence[str] = doc.field(description="One or more genres this game is part of")
rating: Dict[str, float] = doc.field(description="Ratings given on each country")
rating_outside: Mapping[str, float] = doc.field(description="Ratings given on each country")
screenshots: Set[bytes] = doc.field(description="Screenshots of the game")
screenshots_extra: Collection[bytes] = doc.field(description="Screenshots of the game")
players: Iterable[str] = doc.field(description="Some of the notorious players of this game")
review_link: Optional[str] = doc.field(description="The link of the game review (if exists)")
junk: Union[str, bytes] = doc.field(description="This should be strange")
more_junk: Any = doc.field(description="The more junk field")
language: LanguageEnum = doc.field(description="The language of the game")
something: List[Something] = doc.field(description="Something to go along the game")
another: AnotherSomething = doc.field(description="Another something to go along the game")
```
### A note on typing hints or `type` argument
You may have noticed that in the example above, all variables have been created using typing hints. While this is somewhat interesting, you may also want to use the `type` argument as provided from the `attr` package, and `sanic-attrs` is absolutely fine with that. So, our `Game` class would rather looks like:
```python
class Game(doc.Model):
name = doc.field(type=str, description="The name of the game")
platform = doc.field(type=PlatformEnum, description="Which platform it runs on")
score = doc.field(type=float, description="The average score of the game")
resolution_tested = doc.field(type=str, description="The resolution which the game was tested")
genre = doc.field(type=List[str], description="One or more genres this game is part of")
genre_extra = doc.field(type=Sequence[str], description="One or more genres this game is part of")
rating = doc.field(type=Dict[str, float], description="Ratings given on each country")
rating_outside = doc.field(type=Mapping[str, float], description="Ratings given on each country")
screenshots = doc.field(type=Set[bytes], description="Screenshots of the game")
screenshots_extra = doc.field(type=Collection[bytes], description="Screenshots of the game")
players = doc.field(type=Iterable[str], description="Some of the notorious players of this game")
review_link = doc.field(type=Optional[str], description="The link of the game review (if exists)")
junk = doc.field(type=Union[str, bytes], description="This should be strange")
more_junk = doc.field(type=Any, description="The more junk field")
language = doc.field(type=LanguageEnum, description="The language of the game")
something = doc.field(type=List[Something], description="Something to go along the game")
another = doc.field(type=AnotherSomething, description="Another something to go along the game")
```
### A note on a lot of features of `attrs`
There are a lot of features in `attrs` that can be handy while declaring a model, such as validators, factories and etc. For this release, some syntatic sugar is planned regarding validators (since most of the rules can be provided to `doc.field`). Other features, like `factories`, are not encourage at this time (or for the lifetime of this project, undecided) while declaring models since there wasn't enough time to actually test them (so far) :confused:
## On-the-fly input model parsing
There are a few surprises inside `sanic-attrs`. Let's say you have already declared your model, your endpoint and you still have to take the `request.json` and load it as your model? That doesn't seems right ... Fortunatelly, a small middleware was written to handle these cases :wink:
To enable on-the-fly input model parsing, all you need to do is add a `blueprint` to your Sanic app and access the object using the `input_obj` keyword directly from the request:
```python
from sanic_attrs import parser_blueprint
# ...
app.blueprint(parser_blueprint)
# ...
@app.post("/game", strict_slashes=True)
@doc.summary("Inserts the game data into the database")
@doc.response("200", "Game inserted successfuly", model=SuccessOutput)
@doc.response("403", "The user couldn't insert game to application", model=ErrorOutput)
@doc.consumes(Game, location="body", content_type="application/json")
@doc.produces(SuccessOutput)
async def insert_game(request):
my_object = request["input_obj"]
assert isinstance(my_object, Game)
# your logic here
```
**Note**: there are no validations to deal with (really) broken data. If an exception occurs while populating your model, you will find that your `input_obj` keyword will be `None`, along with another key, `input_exc`, that will contain the exception given (if any). If you want to further customize this behavior so you won't need to check for `None` in every request, you can add your own `middleware` **after** adding the `parser_blueprint` to the `app` instance, like the following:
```python
from sanic.response import json
from sanic_attrs import parser_blueprint
# ...
app.blueprint(parser_blueprint)
# ...
@app.middleware("request")
async def check_if_input_is_none(request):
if "input_obj" in request:
if request["input_obj"] is None:
# error handling here
return json({"error": request["input_exc"].args[0]}, 500)
```
## On-the-fly output model serialization
To keep things simple, it is also possible to handle the direct return of `attrs` objects, instead of having to create a dictionary and then serialize or call `sanic.responses.json`, although this is exactly what's running under the hood:
```python
from sanic_attrs import response
# ...
@app.get("/game", strict_slashes=True)
@doc.summary("Gets the most played game in our database")
@doc.response("200", "Game data", model=Game)
@doc.response("403", "The user can't access this endpoint", model=ErrorOutput)
@doc.produces(Game)
async def get_game(request):
game = Game(
name="Cities: Skylines",
platform="PC",
score=9.0,
resolution_tested="1920x1080",
genre=["Simulators", "City Building"],
rating={
"IGN": 8.5,
"Gamespot": 8.0,
"Steam": 4.5
},
players=["Flux", "strictoaster"],
language=1
)
return response.model(game) # <--- the game instance, to be further serialized
```
**Note**: remember to create models that can have all its values serializable to JSON :+1:
### Configure everything else
```python
app.config.API_VERSION = '1.0.0'
app.config.API_TITLE = 'Car API'
app.config.API_DESCRIPTION = 'Car API'
app.config.API_TERMS_OF_SERVICE = 'Use with caution!'
app.config.API_PRODUCES_CONTENT_TYPES = ['application/json']
app.config.API_CONTACT_EMAIL = '[email protected]'
```
### Types not *yet* avaiable
These are the types not available from [`typing`](https://docs.python.org/3/library/typing.html) in the current version (with some notes so I can remember what to do later (if necessary)):
- `AbstractSet` - would be like set?
- `AnyStr` - this is mostly like Optional[str] or just str?
- `AsyncContextManager` - not a variable I think
- `AsyncGenerator` - not a variable I think
- `AsyncIterable` - not a variable I think
- `AsyncIterator` - not a variable I think
- `Awaitable` - not a variable I think
- `BinaryIO` - hmmm, I don't know ... Bytes maybe?
- `ByteString` - could be like bytes, for openapi is `{"type":"string", "format": "byte"}`
- `CT_co` - I don't even know what this is ...
- `Callable` - not a variable
- `CallableMeta` - not a variable
- `ChainMap` - not a variable (?)
- `ClassVar` - generic ...
- `Container` - generic
- `ContextManager` - not a variable
- `Coroutine` - not a variable
- `Counter` - not a variable
- `DefaultDict` - perhaps like dict?
- `Deque` - like List ?
- `FrozenSet` - a "view-only list?
- `Generator` - not a variable
- `Generic` - no way - or Any?
- `Hashable` - a hashmap?
- `IO` - hmmm, from docs: "Generic base class for TextIO and BinaryIO.", so ...
- `ItemsView` - what is an Item? it inherits from AbstractSet ... from docs: "A set is a finite, iterable container."
- `Iterator` - not a variable
- `KT` - generics
- `KeysView` - dict "readonly" ?
- `MappingView` - dict "readonly" ?
- `Match` - generic (I think)
- `MethodDescriptorType` - not a variable
- `MethodWrapperType` - not a variable
- `MutableMapping` - base class of Mapping, docs: "Abstract base class for generic types."
- `MutableSequence` - same as above, but for Sequence
- `MutableSet` - same as above, but for Set
- `NamedTuple` - what to do here? NamedTuple is just an object with variables that can be *anything* I guess ...
- `NamedTupleMeta` - baseclass of NamedTuple
- `NewType` - not a variable / generic ?
- `NoReturn` - not a variable
- `Pattern` - generic
- `Reversible` - generic (Iterable)
- `Sized` - generic
- `SupportsAbs` - not a variable
- `SupportsBytes` - not a variable
- `SupportsComplex` - not a variable
- `SupportsFloat` - not a variable
- `SupportsInt` - not a variable
- `SupportsRound` - not a variable
- `T` - generic
- `TYPE_CHECKING` - ???
- `T_co` - ???
- `T_contra` - ???
- `Text` - returns a str object if created, so I'll stick with str or map it too?
- `TextIO` - buffer, like bytes ... map it?
- `Tuple` - well ... Tuple like lists or Tuple like Tuple[int, str, float] ?
- `TupleMeta` - baseclass of Tuple
- `Type` - generics
- `TypeVar` - generics
- `TypingMeta` - generics
If there's anything missing or required, please fill in a issue or contribute with a PR. PR's are most welcome :smiley:
## TODO
- [ ] Property deal with `required` fields (in OpenAPI `object` schema)
- [ ] Use type hinting to document the return of a function (as output schema / model)
- [ ] Proper testing
- [ ] Increase use cases
- [ ] Find out if I can get the request model without calling the router
- [ ] Documentation
## License
MIT, the same as [`sanic-openapi`](https://github.com/channelcat/sanic-openapi/blob/ffe8a5c7443810f1dfe65ad7dd1991e776931dc1/LICENSE).
|
/sanic-attrs-0.2.1.tar.gz/sanic-attrs-0.2.1/README.md
| 0.71413 | 0.915545 |
README.md
|
pypi
|
import os
from contextlib import contextmanager
from datetime import datetime
from itertools import repeat
from babel import Locale, dates, numbers, support
try:
from pytz.gae import pytz
except ImportError:
from pytz import UTC, timezone
else:
timezone = pytz.timezone
UTC = pytz.UTC
from sanic_babel.speaklater import LazyString
__version__ = "0.3.0"
def is_immutable(self):
raise TypeError(
"{!r} objects are immutable\
".format(
self.__class__.__name__
)
)
def get_request_container(request):
return request.ctx.__dict__ if hasattr(request, "ctx") else request
class ImmutableDictMixin:
"""Makes a :class:`dict` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
@classmethod
def fromkeys(cls, keys, value=None):
instance = super(cls, cls).__new__(cls)
instance.__init__(zip(keys, repeat(value)))
return instance
def __reduce_ex__(self, protocol):
return type(self), (dict(self),)
def _iter_hashitems(self):
return iter(self)
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(frozenset(self._iter_hashitems()))
return rv
def setdefault(self, key, default=None):
is_immutable(self)
def update(self, *args, **kwargs):
is_immutable(self)
def pop(self, key, default=None):
is_immutable(self)
def popitem(self):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
def __delitem__(self, key):
is_immutable(self)
def clear(self):
is_immutable(self)
class ImmutableDict(ImmutableDictMixin, dict):
"""An immutable :class:`dict`.
.. versionadded:: 0.5
"""
def __repr__(self):
return "{}({})".format(self.__class__.__name__, dict.__repr__(self),)
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return dict(self)
def __copy__(self):
return self
class Babel:
"""Central controller class that can be used to configure how
sanic-babel behaves. Each application that wants to use sanic-babel
has to create, or run :meth:`init_app` on, an instance of this class
after the configuration was initialized.
"""
default_date_formats = ImmutableDict(
{
"time": "medium",
"date": "medium",
"datetime": "medium",
"time.short": None,
"time.medium": None,
"time.full": None,
"time.long": None,
"date.short": None,
"date.medium": None,
"date.full": None,
"date.long": None,
"datetime.short": None,
"datetime.medium": None,
"datetime.full": None,
"datetime.long": None,
}
)
def __init__(
self,
app=None,
default_locale="en",
default_timezone="UTC",
date_formats=None,
configure_jinja=True,
):
self._default_locale = default_locale
self._default_timezone = default_timezone
self._date_formats = date_formats
self._configure_jinja = configure_jinja
self.app = app
self.locale_selector_func = None
self.timezone_selector_func = None
if app is not None:
self.init_app(app)
def init_app(self, app):
"""Set up this instance for use with *app*, if no app was passed to
the constructor.
"""
self.app = app
app.ctx.babel_instance = self
if not hasattr(app.ctx, "extensions"):
app.ctx.extensions = {}
app.ctx.extensions["babel"] = self
app.ctx.babel_translations = {} # cache translations per locale?
app.config.setdefault("BABEL_DEFAULT_LOCALE", self._default_locale)
app.config.setdefault("BABEL_DEFAULT_TIMEZONE", self._default_timezone)
if self._date_formats is None:
self._date_formats = self.default_date_formats.copy()
#: a mapping of Babel datetime format strings that can be modified
#: to change the defaults. If you invoke :func:`format_datetime`
#: and do not provide any format string sanic-babel will do the
#: following things:
#:
#: 1. look up ``date_formats['datetime']``. By default ``'medium'``
#: is returned to enforce medium length datetime formats.
#: 2. ``date_formats['datetime.medium'] (if ``'medium'`` was
#: returned in step one) is looked up. If the return value
#: is anything but `None` this is used as new format string.
#: otherwise the default for that language is used.
self.date_formats = self._date_formats
if self._configure_jinja:
if not hasattr(app.ctx, "jinja_env"):
raise ValueError("app.ctx.jinja_env shoud be setup at first.")
app.ctx.jinja_env.filters.update(
datetimeformat=format_datetime,
dateformat=format_date,
timeformat=format_time,
timedeltaformat=format_timedelta,
numberformat=format_number,
decimalformat=format_decimal,
currencyformat=format_currency,
percentformat=format_percent,
scientificformat=format_scientific,
)
app.ctx.jinja_env.add_extension("jinja2.ext.i18n")
app.ctx.jinja_env.newstyle_gettext = True
# reference for update context in jinja_env
self._get_translations = get_translations
def localeselector(self, f):
"""Registers a callback function for locale selection. The default
behaves as if a function was registered that returns `None` all the
time. If `None` is returned, the locale falls back to the one from
the configuration.
This has to return the locale as string (eg: ``'de_AT'``, ''`en_US`'')
"""
assert (
self.locale_selector_func is None
), "a localeselector function is already registered"
self.locale_selector_func = f
return f
def timezoneselector(self, f):
"""Registers a callback function for timezone selection. The default
behaves as if a function was registered that returns `None` all the
time. If `None` is returned, the timezone falls back to the one from
the configuration.
This has to return the timezone as string (eg: ``'Europe/Vienna'``)
"""
assert (
self.timezone_selector_func is None
), "a timezoneselector function is already registered"
self.timezone_selector_func = f
return f
def list_translations(self):
"""Returns a list of all the locales translations exist for. The
list returned will be filled with actual locale objects and not just
strings.
"""
result = []
for dirname in self.translation_directories:
if not os.path.isdir(dirname):
continue
for folder in os.listdir(dirname):
locale_dir = os.path.join(dirname, folder, "LC_MESSAGES")
if not os.path.isdir(locale_dir):
continue
if filter(lambda x: x.endswith(".mo"), os.listdir(locale_dir)):
result.append(Locale.parse(folder))
# If not other translations are found, add the default locale.
if not result:
result.append(Locale.parse(self._default_locale))
return result
@property
def default_locale(self):
"""The default locale from the configuration as instance of a
`babel.Locale` object.
"""
return Locale.parse(self.app.config["BABEL_DEFAULT_LOCALE"])
@property
def default_timezone(self):
"""The default timezone from the configuration as instance of a
`pytz.timezone` object.
"""
return timezone(self.app.config["BABEL_DEFAULT_TIMEZONE"])
@property
def translation_directories(self):
directories = self.app.config.get(
"BABEL_TRANSLATION_DIRECTORIES", "translations"
).split(";")
root_path = getattr(self.app, "root_path", None)
for path in directories:
if not os.path.isabs(path) and root_path is not None:
path = os.path.join(root_path, path)
yield path
def get_translations(request=None):
"""Returns the correct gettext translations that should be used for
this request. This will never fail and return a dummy translation
object if used outside of the request or if a translation cannot be
found.
"""
if request is None:
return support.NullTranslations()
request_ = get_request_container(request)
translations = request_.get("babel_translations", None)
if translations is None:
app = request.app
locale = get_locale(request)
if locale in app.ctx.babel_translations:
request_["babel_translations"] = app.ctx.babel_translations[locale]
return app.ctx.babel_translations[locale]
translations = support.Translations()
babel = app.ctx.babel_instance
for dirname in babel.translation_directories:
catalog = support.Translations.load(dirname, [locale])
translations.merge(catalog)
# FIXME: Workaround for merge() being really, really stupid. It
# does not copy _info, plural(), or any other instance variables
# populated by GNUTranslations. We probably want to stop using
# `support.Translations.merge` entirely.
if hasattr(catalog, "plural"):
translations.plural = catalog.plural
request_["babel_translations"] = translations
app.ctx.babel_translations[locale] = translations
return translations
def get_locale(request=None):
"""Returns the locale that should be used for this request as
`babel.Locale` object. This returns `Locale.parse('en')` if used outside
of a request.
"""
if request is None:
return Locale.parse("en")
request_ = get_request_container(request)
locale = request_.get("babel_locale", None)
if locale is None:
babel = request.app.ctx.babel_instance
if babel.locale_selector_func is None:
locale = babel.default_locale
else:
rv = babel.locale_selector_func(request)
if rv is None:
locale = babel.default_locale
else:
locale = Locale.parse(rv)
request_["babel_locale"] = locale
return locale
def get_timezone(request=None):
"""Returns the timezone that should be used for this request as
`pytz.timezone` object. This returns `UTC` if used outside of
a request.
"""
if request is None:
return UTC
request_ = get_request_container(request)
tzinfo = request_.get("babel_tzinfo")
if tzinfo is None:
babel = request.app.ctx.babel_instance
if babel.timezone_selector_func is None:
tzinfo = babel.default_timezone
else:
rv = babel.timezone_selector_func(request)
if rv is None:
tzinfo = babel.default_timezone
else:
if isinstance(rv, str):
tzinfo = timezone(rv)
else:
tzinfo = rv
request_["babel_tzinfo"] = tzinfo
return tzinfo
def refresh(request=None):
"""Refreshes the cached timezones and locale information. This can
be used to switch a translation between a request and if you want
the changes to take place immediately, not just with the next request::
user.timezone = request.form['timezone']
user.locale = request.form['locale']
refresh(request)
jinja.flash(gettext('Language was changed', request))
NOTICE: :func:`jinja.flash` function is from `sanic-jinja2` package.
Without that refresh, the :func:`jinja.flash` function would probably
return English text and a now German page.
"""
if request is None:
return
request_ = get_request_container(request)
for key in "babel_locale", "babel_tzinfo", "babel_translations":
if key in request_:
request_.pop(key)
@contextmanager
def force_locale(locale, request=None):
"""Temporarily overrides the currently selected locale.
Sometimes it is useful to switch the current locale to different one, do
some tasks and then revert back to the original one. For example, if the
user uses German on the web site, but you want to send them an email in
English, you can use this function as a context manager::
with force_locale('en_US', request):
send_email(gettext('Hello!', request), ...)
:param locale: The locale to temporary switch to (ex: 'en_US').
:param request: the current Request object
"""
if request is None:
yield
return
babel = request.app.ctx.babel_instance
request_ = get_request_container(request)
orig_locale_selector_func = babel.locale_selector_func
orig_attrs = {}
for key in ("babel_translations", "babel_locale"):
orig_attrs[key] = request_.get(key, None)
try:
babel.locale_selector_func = lambda request: locale
for key in orig_attrs:
request_[key] = None
yield
finally:
babel.locale_selector_func = orig_locale_selector_func
for key, value in orig_attrs.items():
request_[key] = value
def _get_format(key, format, request):
"""A small helper for the datetime formatting functions. Looks up
format defaults for different kinds.
"""
if request is None:
formats = Babel.default_date_formats.copy()
else:
formats = request.app.ctx.extensions["babel"].date_formats
if format is None:
format = formats[key]
if format in ("short", "medium", "full", "long"):
rv = formats["{}.{}".format(key, format)]
if rv is not None:
format = rv
return format
def to_user_timezone(datetime, request=None):
"""Convert a datetime object to the user's timezone. This automatically
happens on all date formatting unless rebasing is disabled. If you need
to convert a :class:`datetime.datetime` object at any time to the user's
timezone (as returned by :func:`get_timezone` this function can be used).
"""
if datetime.tzinfo is None:
datetime = datetime.replace(tzinfo=UTC)
tzinfo = get_timezone(request)
return tzinfo.normalize(datetime.astimezone(tzinfo))
def to_utc(datetime, request=None):
"""Convert a datetime object to UTC and drop tzinfo. This is the
opposite operation to :func:`to_user_timezone`.
"""
if datetime.tzinfo is None:
datetime = get_timezone(request).localize(datetime)
return datetime.astimezone(UTC).replace(tzinfo=None)
def format_datetime(datetime=None, format=None, rebase=True, request=None):
"""Return a date formatted according to the given pattern. If no
:class:`~datetime.datetime` object is passed, the current time is
assumed. By default rebasing happens which causes the object to
be converted to the users's timezone (as returned by
:func:`to_user_timezone`). This function formats both date and
time.
The format parameter can either be ``'short'``, ``'medium'``,
``'long'`` or ``'full'`` (in which cause the language's default for
that setting is used, or the default from the :attr:`Babel.date_formats`
mapping is used) or a format string as documented by Babel.
This function is also available in the template context as filter
named `datetimeformat`.
"""
format = _get_format("datetime", format, request)
return _date_format(
dates.format_datetime, datetime, format, rebase, request=request
)
def format_date(date=None, format=None, rebase=True, request=None):
"""Return a date formatted according to the given pattern. If no
:class:`~datetime.datetime` or :class:`~datetime.date` object is passed,
the current time is assumed. By default rebasing happens which causes
the object to be converted to the users's timezone (as returned by
:func:`to_user_timezone`). This function only formats the date part
of a :class:`~datetime.datetime` object.
The format parameter can either be ``'short'``, ``'medium'``,
``'long'`` or ``'full'`` (in which cause the language's default for
that setting is used, or the default from the :attr:`Babel.date_formats`
mapping is used) or a format string as documented by Babel.
This function is also available in the template context as filter
named `dateformat`.
"""
if rebase and isinstance(date, datetime):
date = to_user_timezone(date)
format = _get_format("date", format, request)
return _date_format(
dates.format_date, date, format, rebase, request=request
)
def format_time(time=None, format=None, rebase=True, request=None):
"""Return a time formatted according to the given pattern. If no
:class:`~datetime.datetime` object is passed, the current time is
assumed. By default rebasing happens which causes the object to
be converted to the users's timezone (as returned by
:func:`to_user_timezone`). This function formats both date and
time.
The format parameter can either be ``'short'``, ``'medium'``,
``'long'`` or ``'full'`` (in which cause the language's default for
that setting is used, or the default from the :attr:`Babel.date_formats`
mapping is used) or a format string as documented by Babel.
This function is also available in the template context as filter
named `timeformat`.
"""
format = _get_format("time", format, request)
return _date_format(
dates.format_time, time, format, rebase, request=request
)
def format_timedelta(
datetime_or_timedelta,
granularity="second",
add_direction=False,
threshold=0.85,
request=None,
):
"""Format the elapsed time from the given date to now or the given
timedelta.
This function is also available in the template context as filter
named `timedeltaformat`.
"""
if isinstance(datetime_or_timedelta, datetime):
datetime_or_timedelta = datetime.utcnow() - datetime_or_timedelta
return dates.format_timedelta(
datetime_or_timedelta,
granularity,
threshold=threshold,
add_direction=add_direction,
locale=get_locale(request),
)
def _date_format(formatter, obj, format, rebase, request=None, **extra):
"""Internal helper that formats the date."""
locale = get_locale(request)
extra = {}
if formatter is not dates.format_date and rebase:
extra["tzinfo"] = get_timezone(request)
return formatter(obj, format, locale=locale, **extra)
def format_number(number, request=None):
"""Return the given number formatted for the locale in request
:param number: the number to format
:param request: the current Request object
:return: the formatted number
:rtype: str
"""
return numbers.format_number(number, locale=get_locale(request))
def format_decimal(number, format=None, request=None):
"""Return the given decimal number formatted for the locale in request
:param number: the number to format
:param format: the format to use
:param request: the current Request object
:return: the formatted number
:rtype: str
"""
return numbers.format_decimal(
number, format=format, locale=get_locale(request)
)
def format_currency(
number,
currency,
format=None,
currency_digits=True,
format_type="standard",
request=None,
):
"""Return the given number formatted for the locale in request
:param number: the number to format
:param currency: the currency code
:param format: the format to use
:param currency_digits: use the currency’s number of decimal digits
[default: True]
:param format_type: the currency format type to use
[default: standard]
:param request: the current Request object
:return: the formatted number
:rtype: str
"""
return numbers.format_currency(
number,
currency,
format=format,
locale=get_locale(request),
currency_digits=currency_digits,
format_type=format_type,
)
def format_percent(number, format=None, request=None):
"""Return formatted percent value for the locale in request
:param number: the number to format
:param format: the format to use
:param request: the current Request object
:return: the formatted percent number
:rtype: str
"""
return numbers.format_percent(
number, format=format, locale=get_locale(request)
)
def format_scientific(number, format=None, request=None):
"""Return value formatted in scientific notation for the locale in request
:param number: the number to format
:param format: the format to use
:param request: the current Request object
:return: the formatted percent number
:rtype: str
"""
return numbers.format_scientific(
number, format=format, locale=get_locale(request)
)
def gettext(string, request=None, **variables):
"""Translates a string with the current locale and passes in the
given keyword arguments as mapping to a string formatting string.
::
gettext('Hello World!', request)
gettext('Hello %(name)s!', request, name='World')
"""
t = get_translations(request)
if t is None:
return (string % variables) if variables else string
s = t.ugettext(string)
return (s % variables) if variables else s
_ = gettext
def ngettext(singular, plural, num, request=None, **variables):
"""Translates a string with the current locale and passes in the
given keyword arguments as mapping to a string formatting string.
The `num` parameter is used to dispatch between singular and various
plural forms of the message. It is available in the format string
as ``%(num)d`` or ``%(num)s``. The source language should be
English or a similar language which only has one plural form.
::
ngettext('%(num)d Apple', '%(num)d Apples', request=request,
num=len(apples))
"""
variables.setdefault("num", num)
t = get_translations(request)
if t is None:
s = singular if num == 1 else plural
return s if not variables else s % variables
s = t.ungettext(singular, plural, num)
return s if not variables else s % variables
def pgettext(context, string, request=None, **variables):
"""Like :func:`gettext` but with a context.
"""
t = get_translations(request)
if t is None:
return string if not variables else string % variables
s = t.upgettext(context, string)
return s if not variables else s % variables
def npgettext(context, singular, plural, num, request=None, **variables):
"""Like :func:`ngettext` but with a context.
"""
variables.setdefault("num", num)
t = get_translations(request)
if t is None:
s = singular if num == 1 else plural
return s if not variables else s % variables
s = t.unpgettext(context, singular, plural, num)
return s if not variables else s % variables
def lazy_gettext(string, **variables):
"""Like :func:`gettext` but the string returned is lazy which means
it will be translated when it is used as an actual string.
NOTE: As `sanic` does not provide something like `ctx_stack`, the
`lazy object` should call with `request` before using as an actual string.
Example::
hello = lazy_gettext('Hello World')
@app.route('/')
def index(request):
return str(hello(request))
"""
return LazyString(gettext, string, **variables)
def lazy_pgettext(context, string, **variables):
"""Like :func:`pgettext` but the string returned is lazy which means
it will be translated when it is used as an actual string.
"""
return LazyString(pgettext, context, string, **variables)
|
/sanic-babel-0.3.0.tar.gz/sanic-babel-0.3.0/sanic_babel/__init__.py
| 0.746046 | 0.21566 |
__init__.py
|
pypi
|
from functools import wraps
from typing import Callable, Any, Union
from sanic import Request
from sanic_beskar.exceptions import (
BeskarError,
MissingRoleError,
MissingRightError,
MissingToken,
)
from sanic_beskar.utilities import (
current_guard,
add_token_data_to_app_context,
app_context_has_token_data,
remove_token_data_from_app_context,
current_rolenames,
)
async def _verify_and_add_token(request: Request, optional: bool = False) -> None:
"""
This helper method just checks and adds token data to the app context.
If optional is False and the header is missing the token, just returns.
Will not add token data if it is already present.
Only used in this module
Args:
request (sanic.Request): Current Sanic ``Request``
optional (bool, optional): Token is not required. Defaults to False.
Raises:
MissingToken: Token is required and not present.
"""
if not app_context_has_token_data():
guard = current_guard()
try:
token = guard.read_token(request=request)
except MissingToken as err:
if optional:
return
raise err
token_data = await guard.extract_token(token)
add_token_data_to_app_context(token_data)
def auth_required(method: Callable) -> Callable[..., Any]:
"""
This decorator is used to ensure that a user is authenticated before
being able to access a sanic route. It also adds the current user to the
current sanic context.
Args:
method (Callable): Function or route to protect.
Returns:
None: Decorator
Raises:
MissingToken: No authenticated user token is available to authorize.
"""
@wraps(method)
async def wrapper(request: Request, *args: tuple, **kwargs: dict) -> Any:
# TODO: hack to work around class based views
if not isinstance(request, Request):
if isinstance(args[0], Request):
request = args[0]
await _verify_and_add_token(request=request)
try:
return await method(request, *args, **kwargs)
finally:
remove_token_data_from_app_context()
return wrapper
def auth_accepted(method: Callable) -> Callable[..., Any]:
"""
This decorator is used to allow an authenticated user to be identified
while being able to access a sanic route, and adds the current user to the
current sanic context.
Args:
method (Callable): Function or route to protect.
Returns:
None: Decorator
"""
@wraps(method)
async def wrapper(request: Request, *args: tuple, **kwargs: dict) -> Any:
# TODO: hack to work around class based views
if not isinstance(request, Request):
if isinstance(args[0], Request):
request = args[0]
try:
await _verify_and_add_token(request, optional=True)
return await method(request, *args, **kwargs)
finally:
remove_token_data_from_app_context()
return wrapper
def roles_required(*required_rolenames: Union[list, set]) -> Callable[..., Any]:
"""
This decorator ensures that any uses accessing the decorated route have all
the needed roles to access it. If an :py:func:`auth_required` decorator is not
supplied already, this decorator will implicitly check :py:func:`auth_required`
first
Args:
required_rolenames (Union[list, set]): Role names required to be present
in the authenticated users ``roles`` attribute.
Returns:
None: Decorator
Raises:
sanic_beskar.BeskarError: `roles_disabled` for this application.
MissingRoleError: Missing required role names in user ``roles`` attribute.
MissingTokenError: Token missing in ``Sanic.Request``
"""
def decorator(method: Callable) -> Callable:
@wraps(method)
async def wrapper(request: Request, *args: tuple, **kwargs: dict) -> Any:
BeskarError.require_condition(
not current_guard().roles_disabled,
"This feature is not available because roles are disabled",
)
# TODO: hack to work around class based views
if not isinstance(request, Request):
if isinstance(args[0], Request):
request = args[0]
await _verify_and_add_token(request)
try:
MissingRoleError.require_condition(
not {*required_rolenames} - {*(await current_rolenames())},
'This endpoint requires all the following roles: '
f'[{required_rolenames}]',
)
return await method(request, *args, **kwargs)
finally:
remove_token_data_from_app_context()
return wrapper
return decorator
def rights_required(*required_rights: Union[list, set]) -> Callable[..., Any]:
"""
This decorator ensures that any uses accessing the decorated route have all
the needed rights to access it. If an :py:func:`auth_required` decorator is not
supplied already, this decorator will implicitly check :py:func:`auth_required`
first.
Args:
required_rights (Union[list, set]): Right names required to be present,
based upon the implied rights in the authenticated users ``roles`` attribute
breakdown.
Returns:
None: Decorator
Raises:
sanic_beskar.BeskarError: `roles_disabled` for this application.
MissingRightError: Missing required rights in user ``roles`` attribute breakdown.
MissingTokenError: Token missing in ``Sanic.Request``
"""
def decorator(method: Callable[..., Any]) -> Callable[..., Any]:
@wraps(method)
async def wrapper(request: Request, *args: tuple, **kwargs: dict) -> Any:
BeskarError.require_condition(
current_guard().rbac_definitions != {},
"This feature is not available because RBAC is not enabled",
)
# TODO: hack to work around class based views
if not isinstance(request, Request):
if isinstance(args[0], Request):
request = args[0]
await _verify_and_add_token(request)
try:
current_roles = await current_rolenames()
for right in required_rights:
BeskarError.require_condition(
right in current_guard().rbac_definitions,
'This endpoint requires a right which is not otherwise defined: '
f'[{right}]',
)
MissingRightError.require_condition(
not {*current_roles}.isdisjoint(
{*(current_guard().rbac_definitions[right])}
),
'This endpoint requires all the following rights: '
f'[{required_rights}]',
)
return await method(request, *args, **kwargs)
finally:
remove_token_data_from_app_context()
return wrapper
return decorator
def roles_accepted(*accepted_rolenames: Union[list, set]) -> Callable[..., Any]:
"""
This decorator ensures that any uses accessing the decorated route have one
of the needed roles to access it. If an :py:func:`auth_required` decorator is not
supplied already, this decorator will implicitly check :py:func:`auth_required`
first
Args:
accepted_rolenames (Union[list, set]): Role names, at least one of which is
required to be present, in the authenticated users ``roles`` attribute.
Returns:
None: Decorator
"""
def decorator(method: Callable) -> Callable[..., Any]:
@wraps(method)
async def wrapper(request: Request, *args: tuple, **kwargs: dict) -> Any:
BeskarError.require_condition(
not current_guard().roles_disabled,
"This feature is not available because roles are disabled",
)
# TODO: hack to work around class based views
if not isinstance(request, Request):
if isinstance(args[0], Request):
request = args[0]
await _verify_and_add_token(request)
try:
MissingRoleError.require_condition(
not {*(await current_rolenames())}.isdisjoint(accepted_rolenames),
'This endpoint requires one of the following roles: '
f'[{accepted_rolenames}]',
)
return await method(request, *args, **kwargs)
finally:
remove_token_data_from_app_context()
return wrapper
return decorator
|
/sanic_beskar-2.2.11-py3-none-any.whl/sanic_beskar/decorators.py
| 0.739893 | 0.23985 |
decorators.py
|
pypi
|
import functools
from collections.abc import Iterable
import re
import datetime as dt
from typing import Optional, Union, Any, TYPE_CHECKING
from types import SimpleNamespace
# If we are using `beanie`, we need to patch JSONEncoder to undersand its objectid
try: # pragma: no cover
from beanie import PydanticObjectId as ObjectId
except (ImportError, ModuleNotFoundError): # pragma: no cover
from bson.objectid import ObjectId # type: ignore
## If we are using `segno`, import for typing
if TYPE_CHECKING:
from segno import QRCode
from sanic_beskar import Beskar as BeskarType
import ujson
from json import JSONEncoder as json_JSONEncoder
from sanic import Sanic, Request
import pendulum
from sanic_beskar.constants import RESERVED_CLAIMS
from sanic_beskar.exceptions import (BeskarError, ConfigurationError)
class JSONEncoder(json_JSONEncoder): # pragma: no cover
def default(self, o: Any) -> Any:
if hasattr(o, '__json__'):
return o.__json__()
if isinstance(o, Iterable):
return list(o)
if isinstance(o, dt.datetime):
return o.isoformat()
if isinstance(o, ObjectId):
return str(o)
if hasattr(o, '__getitem__') and hasattr(o, 'keys'):
return dict(o)
if hasattr(o, '__dict__'):
return {member: getattr(o, member)
for member in dir(o)
if not member.startswith('_') and
not hasattr(getattr(o, member), '__call__')}
return JSONEncoder.default(self, o)
def get_request(request: Request) -> Request:
try:
if not request:
return Request.get_current()
return request
except Exception:
raise BeskarError("Could not identify current Sanic request")
def normalize_rbac(rbac_dump: dict) -> dict:
"""
Normalize an RBAC dump into something usable.
Yes, I know this will produce duplicates in the role lists of a permission,
but its much faster than dealing with a set, so we don't care.
Example:
{'rolename': ['read', 'write', 'update'],}
Produces:
{'read': ['rolename'], 'write': ['rolename'], 'update': ['rolename']}
Args:
rbac_dump (dict): RBAC dump from config/storage.
Returns:
dict: Normalized (for our purposes) RBAC policy.
"""
_inversed: dict = {}
for k in rbac_dump:
for v in rbac_dump[k]:
_inversed.setdefault(v, []).append(k)
return _inversed
async def is_valid_json(data: str) -> Any:
"""
Simple helper to validate if a value is valid json data
:param data: Data to validate for valid JSON
:type data: str
:returns: ``True``, ``False``
:rtype: bool
"""
try:
return ujson.loads(data)
except (ValueError, TypeError):
return False
def duration_from_string(text: str) -> pendulum.Duration:
"""
Parses a duration from a string. String may look like these patterns:
* 1 Hour
* 7 days, 45 minutes
* 1y11d20m
An exception will be raised if the text cannot be parsed
:param text: String to parse for duration detail
:type text: str
:returns: Time Object
:rtype: :py:mod:`pendulum`
:raises: :py:exc:`~sanic_beskar.ConfigurationError` on bad strings
"""
text = text.replace(' ', '')
text = text.replace(',', '')
text = text.lower()
match = re.match(
r'''
((?P<years>\d+)y[a-z]*)?
((?P<months>\d+)mo[a-z]*)?
((?P<days>\d+)d[a-z]*)?
((?P<hours>\d+)h[a-z]*)?
((?P<minutes>\d+)m[a-z]*)?
((?P<seconds>\d+)s[a-z]*)?
''',
text,
re.VERBOSE,
)
ConfigurationError.require_condition(
match,
f"Couldn't parse {text}",
)
parts = match.groupdict() # type: ignore
clean = {k: int(v) for (k, v) in parts.items() if v}
ConfigurationError.require_condition(
clean,
f"Couldn't parse {text}",
)
with ConfigurationError.handle_errors(f"Couldn't parse {text}"):
return pendulum.duration(**clean)
@functools.lru_cache(maxsize=None)
def current_guard(ctx: Union[Sanic, SimpleNamespace, None] = None) -> 'BeskarType':
"""
Fetches the current instance of :py:class:`~sanic_beskar.Beskar`
that is attached to the current sanic app
:param ctx: Application Context
:type ctx: Optional[:py:class:`sanic.Sanic`]
:returns: Current Beskar Guard object for this app context
:rtype: :py:class:`~sanic_beskar.Beskar`
:raises: :py:exc:`~sanic_beskar.BeskarError` if no guard found
"""
if isinstance(ctx, Sanic):
ctx = getattr(ctx, 'ctx')
if not ctx:
ctx = Sanic.get_app().ctx
guard: BeskarType = ctx.extensions.get('beskar', None) # type: ignore
BeskarError.require_condition(
guard is not None,
"No current guard found; Beskar must be initialized first",
)
return guard
def app_context_has_token_data(ctx: Optional[Sanic] = None) -> bool:
"""
Checks if there is already token_data added to the app context
:param ctx: Application Context
:type ctx: Optional[Sanic]
:returns: ``True``, ``False``
:rtype: bool
"""
if not ctx:
ctx = Sanic.get_app().ctx
return hasattr(ctx, 'token_data')
def add_token_data_to_app_context(token_data: dict) -> None:
"""
Adds a dictionary of token data (presumably unpacked from a token) to the
top of the sanic app's context
:param token_data: ``dict`` of token data to add
:type token_data: dict
"""
ctx = Sanic.get_app().ctx
ctx.token_data = token_data
def get_token_data_from_app_context() -> dict:
"""
Fetches a dict of token data from the top of the sanic app's context
:returns: Token ``dict`` found in current app context
:rtype: dict
:raises: :py:exc:`~sanic_beskar.BeskarError` on missing token
"""
ctx = Sanic.get_app().ctx
token_data = getattr(ctx, 'token_data', {})
BeskarError.require_condition(
token_data is not {},
"""
No token_data found in app context.
Make sure @auth_required decorator is specified *first* for route
""",
)
return token_data
def remove_token_data_from_app_context() -> None:
"""
Removes the dict of token data from the top of the sanic app's context
"""
ctx = Sanic.get_app().ctx
if app_context_has_token_data(ctx):
del ctx.token_data
def current_user_id() -> Union[str, None]:
"""
This method returns the user id retrieved from token data attached to
the current sanic app's context
:returns: ``id`` of current :py:class:`User`, if any
:rtype: str
:raises: :py:exc:`~sanic_beskar.BeskarError` if no user/token found
"""
token_data = get_token_data_from_app_context()
user_id: str = token_data.get('id', None)
BeskarError.require_condition(
user_id is not None,
"Could not fetch an id for the current user",
)
return user_id
async def generate_totp_qr(user_totp: str) -> 'QRCode':
"""
This is a helper utility to generate a :py:mod:`segno`
QR code renderer, based upon a supplied `User` TOTP value.
:param user_totp: TOTP configuration of the user
:type user_totp: json
:returns: ``Segno`` object based upon user's stored TOTP configuration
:rtype: :py:class:`Segno`
"""
try: # pragma: no cover
import segno
except (ModuleNotFoundError, ImportError) as e: # pragma: no cover
raise ConfigurationError("Attempting to generate a TOTP QR code,"
"but you didn't install the necessary `segno` library!") from e
return segno.make(user_totp)
async def current_user() -> Any:
"""
This method returns a user instance for token data attached to the
current sanic app's context
:returns: Current logged in ``User`` object
:rtype: populated :py:attr:`user_class` attribute of the logged in :py:class:`~sanic_beskar.Beskar` instance
:raises: :py:exc:`~sanic_beskar.BeskarError` if no user identified
"""
user_id = current_user_id()
guard = current_guard()
user = await guard.user_class.identify(user_id)
BeskarError.require_condition(
user is not None,
"Could not identify the current user from the current id",
)
return user
async def current_rolenames() -> set:
"""
This method returns the names of all roles associated with the current user
:returns: Set of roles for currently logged in users
:rtype: set
"""
token_data = get_token_data_from_app_context()
if 'rls' not in token_data:
# This is necessary so our set arithmetic works correctly
return set(['non-empty-but-definitely-not-matching-subset'])
return set(r.strip() for r in token_data['rls'].split(','))
def current_custom_claims() -> dict:
"""
This method returns any custom claims in the current token
:returns: Custom claims for currently logged in user
:rtype: dict
"""
token_data = get_token_data_from_app_context()
return {k: v for (k, v) in token_data.items() if k not in RESERVED_CLAIMS}
|
/sanic_beskar-2.2.11-py3-none-any.whl/sanic_beskar/utilities.py
| 0.795618 | 0.203035 |
utilities.py
|
pypi
|
from buzz import Buzz
from sanic.exceptions import SanicException
from sanic import json
from sanic.response import JSONResponse
class BeskarError(SanicException, Buzz):
"""
Provides a custom exception class for sanic-beskar based on py-buzz.
`py-buzz on gitub <https://github.com/dusktreader/py-buzz>`_
"""
status: int = 401
def __init__(self, message: str, *args: tuple, **kwargs: dict):
self.status: int = self.status
self.message: str = f'{self.__class__.__name__}: {message}'
self.extra_args: tuple = args
self.extra_kwargs: dict = kwargs
self.json_response: JSONResponse = json({
"error": message,
"data": self.__class__.__name__,
"status": self.status,
},
status=self.status)
super().__init__(self.message, self.status)
def __str__(self) -> str:
return f"{super().__str__()} ({self.status})"
class MissingClaimError(BeskarError):
"""
The token is missing a required claim
"""
pass
class BlacklistedError(BeskarError):
"""
The token has been blacklisted and may not be used any more
"""
status = 403
class ExpiredAccessError(BeskarError):
"""
The token has expired for access and must be refreshed
"""
pass
class EarlyRefreshError(BeskarError):
"""
The token has not yet expired for access and may not be refreshed
"""
status = 425 # HTTP Status Code : 425 Too Early
class ExpiredRefreshError(BeskarError):
"""
The token has expired for refresh. An entirely new token must be issued
"""
pass
class MissingToken(BeskarError):
"""
The header is missing the required token
"""
pass
class InvalidTokenHeader(BeskarError):
"""
The token contained in the header is invalid
"""
pass
class VerifyError(InvalidTokenHeader):
"""
The token contained in the header is invalid
"""
pass
class InvalidUserError(BeskarError):
"""
The user is no longer valid and is now not authorized
"""
status = 403
class MissingRoleError(BeskarError):
"""
The token is missing a required role
"""
status = 403
class MissingRightError(BeskarError):
"""
The token is missing a required right based upon role breakdown
"""
status = 403
class MissingUserError(BeskarError):
"""
The user could not be identified
"""
pass
class AuthenticationError(BeskarError):
"""
The entered user's password did not match the stored password
"""
pass
class ClaimCollisionError(BeskarError):
""""
Custom claims to pack into the payload collide with reserved claims
"""
pass
class LegacyScheme(BeskarError):
"""
The processed hash is using an outdated scheme
"""
pass
class InvalidResetToken(BeskarError):
"""
The supplied registration token is invalid
"""
pass
class InvalidRegistrationToken(BeskarError):
"""
The supplied registration token is invalid
"""
pass
class MisusedRegistrationToken(BeskarError):
"""
Attempted to use a registration token for normal access
"""
pass
class MisusedResetToken(BeskarError):
"""
Attempted to use a password reset token for normal access
"""
pass
class ConfigurationError(BeskarError):
"""
There was a problem with the configuration
"""
pass
class TOTPRequired(AuthenticationError):
"""
The user requires TOTP authentication, per configuation
`BESKAR_TOTP_ENFORCE` which was not performed
by this call to `authenticate()`. A call to
`authenticate_totp()` should be performed seperately,
or a call to `authenticate()` again, but providing the
users `token` value should be done.
"""
pass
|
/sanic_beskar-2.2.11-py3-none-any.whl/sanic_beskar/exceptions.py
| 0.772144 | 0.248722 |
exceptions.py
|
pypi
|
from typing import Optional
from bson.objectid import ObjectId
# umongo is missing type hints at this time
from umongo.exceptions import NotCreatedError # type: ignore
from umongo import Document # type: ignore
class UmongoUserMixin(Document):
"""
A short-cut providing required methods and attributes for a user class
implemented with `uMongo <https://github.com/Scille/umongo/blob/master/docs/index.rst>`_
+ `Motor(async) <https://github.com/mongodb/motor/>`_. Makes many assumptions about
how the class is defined.
**ASSUMPTIONS**
* The model has an ``id`` column that uniquely identifies each instance
* The model has a ``rolenames`` column that contains the roles for the
user instance as a comma separated list of roles
* The model has a ``username`` column that is a unique string for each instance
* The model has a ``password`` column that contains its hashed password
"""
@property
def rolenames(self) -> Optional[list]:
"""
*Required Attribute or Property*
sanic-beskar requires that the user class has a
:py:attr:`rolenames` instance attribute or property that
provides a list of strings that describe the roles attached to
the user instance.
This can be a seperate table (probably sane), so long as this attribute
or property properly returns the associated values for the user as a
RBAC dict, as:
{'rolename', ['permissions'],}
:returns: Provided :py:class:`User`'s current ``roles``
:rtype: list
"""
try:
return self.roles.split(",") # type: ignore
except Exception:
return []
@classmethod
async def lookup(cls, username: Optional[str] = None, email: Optional[str] = None) -> Optional[Document]:
"""
*Required Method*
sanic-beskar requires that the user class implements a :py:meth:`lookup()`
class method that takes a single :py:data:`username` or :py:data:`email`
argument and returns a user instance if there is one that matches or
``None`` if there is not.
:param username: `username` of the user to lookup
:type username: Optional[str]
:param email: `email` of the user to lookup
:type email: Optional[str]
:returns: ``None`` or :py:class:`User` of the found user
:rtype: :py:class:`User`, None
"""
try:
if username:
return await cls.find_one({'username': username})
if email:
return await cls.find_one({'email': email})
return None
except NotCreatedError:
return None
@classmethod
async def identify(cls, id: str) -> Optional[Document]:
"""
*Required Attribute or Property*
sanic-beskar requires that the user class implements an
:py:meth:`identify()` class method that takes a single
:py:data:`id` argument and returns user instance if
there is one that matches or ``None`` if there is not.
:param self: a :py:class:`User` object
:type self: :py:class:`User`
:returns: Provided :py:class:`User` ``id``
:rtype: str, None
"""
try:
return await cls.find_one({'id': ObjectId(id)})
except NotCreatedError:
return None
@property
def identity(self) -> str:
"""
*Required Attribute or Property*
sanic-beskar requires that the user class has an :py:meth:`identity`
instance attribute or property that provides the unique id of the user
instance
Mongo's :py:data:`id`, by default, is an :py:func:`~bson.objectid.ObjectId()`,
which cannot be serialized by default -- so return as as a
string value instead!
:returns: Provided :py:class:`User` id
:rtype: str
"""
return str(self.id)
|
/sanic_beskar-2.2.11-py3-none-any.whl/sanic_beskar/orm/umongo_user_mixins.py
| 0.945601 | 0.414899 |
umongo_user_mixins.py
|
pypi
|
from typing import Optional, Union
from bson.objectid import ObjectId
from beanie.exceptions import DocumentNotFound
from beanie import Document, PydanticObjectId
class BeanieUserMixin(Document):
"""
A short-cut providing required methods and attributes for a user class
implemented with `tortoise-orm <https://tortoise.github.io/>`_. Makes
many assumptions about how the class is defined.
ASSUMPTIONS:
* The model has an ``id`` column that uniquely identifies each instance
* The model has a ``rolenames`` column that contains the roles for the
user instance as a comma separated list of roles
* The model has a ``username`` column that is a unique string for each instance
* The model has a ``password`` column that contains its hashed password
"""
@property
def identity(self) -> Optional[PydanticObjectId]:
"""
*Required Attribute or Property*
sanic-beskar requires that the user class has an :py:meth:`identity`
instance attribute or property that provides the unique id of the user
instance
:returns: Provided :py:class:`User.id`
:rtype: str
"""
return self.id
@property
def rolenames(self) -> list:
"""
*Required Attribute or Property*
sanic-beskaruires that the user class has a
:py:attr:`rolenames` instance attribute or property that
provides a list of strings that describe the roles attached to
the user instance.
This can be a seperate table (probably sane), so long as this attribute
or property properly returns the associated values for the user as a
RBAC dict, as:
{'rolename', ['permissions'],}
:returns: Provided :py:class:`User`'s current ``roles``
:rtype: list
"""
try:
_roles: list = self.roles.split(",")
return _roles
except Exception:
return []
@classmethod
async def lookup(cls, username: Optional[str] = None, email: Optional[str] = None) -> Union[object, None]:
"""
*Required Method*
sanic-beskar requires that the user class implements a :py:meth:`lookup()`
class method that takes a single :py:data:`username` or :py:data:`email`
argument and returns a user instance if there is one that matches or
``None`` if there is not.
:param username: `username` of the user to lookup
:type username: Optional[str]
:param email: `email` of the user to lookup
:type email: Optional[str]
:returns: ``None`` or :py:class:`User` of the found user
:rtype: :py:class:`User`
"""
try:
if username:
return await cls.find({'username': username}).first_or_none()
if email:
return await cls.find({'email': email}).first_or_none()
return None
except DocumentNotFound:
return None
@classmethod
async def identify(cls, id: str) -> Optional[str]:
"""
*Required Attribute or Property*
sanic-beskar requires that the user class implements an
:py:meth:`identify()` class method that takes a single
:py:data:`id` argument and returns user instance if
there is one that matches or ``None`` if there is not.
:param self: a :py:class:`User` object
:type self: :py:class:`User`
:returns: Provided :py:class:`User` ``id``
:rtype: str, None
"""
try:
return await cls.find({'_id': ObjectId(id)}).first_or_none()
except DocumentNotFound:
return None
|
/sanic_beskar-2.2.11-py3-none-any.whl/sanic_beskar/orm/beanie_user_mixins.py
| 0.948119 | 0.564699 |
beanie_user_mixins.py
|
pypi
|
from typing import Optional, Union
from bson.objectid import ObjectId
from tortoise.exceptions import DoesNotExist
from tortoise.models import Model
class TortoiseUserMixin(Model):
"""
A short-cut providing required methods and attributes for a user class
implemented with `tortoise-orm <https://tortoise.github.io/>`_. Makes
many assumptions about how the class is defined.
ASSUMPTIONS:
* The model has an ``id`` column that uniquely identifies each instance
* The model has a ``rolenames`` column that contains the roles for the
user instance as a comma separated list of roles
* The model has a ``username`` column that is a unique string for each instance
* The model has a ``password`` column that contains its hashed password
"""
@property
def identity(self) -> Union[str, ObjectId]:
"""
*Required Attribute or Property*
sanic-beskar requires that the user class has an :py:meth:`identity`
instance attribute or property that provides the unique id of the user
instance
:returns: Provided :py:class:`User.id`
:rtype: str
"""
return getattr(self, 'id') # type: ignore
@property
def rolenames(self) -> Optional[list]:
"""
*Required Attribute or Property*
sanic-beskaruires that the user class has a
:py:attr:`rolenames` instance attribute or property that
provides a list of strings that describe the roles attached to
the user instance.
This can be a seperate table (probably sane), so long as this attribute
or property properly returns the associated values for the user as a
RBAC dict, as:
{'rolename', ['permissions'],}
:returns: Provided :py:class:`User`'s current ``roles``
:rtype: list
"""
try:
return self.roles.split(",") # type: ignore
except Exception:
return []
@classmethod
async def lookup(cls, username: Optional[str] = None, email: Optional[str] = None) -> Optional[object]:
"""
*Required Method*
sanic-beskar requires that the user class implements a :py:meth:`lookup()`
class method that takes a single :py:data:`username` or :py:data:`email`
argument and returns a user instance if there is one that matches or
``None`` if there is not.
:param username: `username` of the user to lookup
:type username: Optional[str]
:param email: `email` of the user to lookup
:type email: Optional[str]
:returns: ``None`` or :py:class:`User` of the found user
:rtype: :py:class:`User`
"""
try:
if username:
return await cls.filter(username=username).get()
if email:
return await cls.filter(email=email).get()
return None
except DoesNotExist:
return None
@classmethod
async def identify(cls, id: ObjectId) -> Optional[object]:
"""
*Required Attribute or Property*
sanic-beskar requires that the user class implements an
:py:meth:`identify()` class method that takes a single
:py:data:`id` argument and returns user instance if
there is one that matches or ``None`` if there is not.
:param self: a :py:class:`User` object
:type self: :py:class:`User`
:returns: Provided :py:class:`User` ``id``
:rtype: str, None
"""
try:
return await cls.filter(id=id).get()
except DoesNotExist:
return None
|
/sanic_beskar-2.2.11-py3-none-any.whl/sanic_beskar/orm/tortoise_user_mixins.py
| 0.948799 | 0.525247 |
tortoise_user_mixins.py
|
pypi
|
<p align="center">
<img src="https://upload.wikimedia.org/wikipedia/commons/7/70/Cookie.png" alt="Logo" width="250" height="250"/>
<p align="center">
<a href="https://github.com/omarryhan/sanic-cookies"><img alt="Software License" src="https://img.shields.io/badge/license-MIT-brightgreen.svg?style=flat-square" /></a>
<a href="https://travis-ci.org/omarryhan/sanic-cookies"><img alt="Build Status" src="https://travis-ci.org/omarryhan/sanic-cookies.svg?branch=master" /></a>
<a href="https://github.com/python/black"><img alt="Code style: black" src="https://img.shields.io/badge/code%20style-black-000000.svg" /></a>
<a href="https://pepy.tech/badge/sanic-cookies"><img alt="Downloads" src="https://pepy.tech/badge/sanic-cookies" /></a>
<a href="https://pepy.tech/badge/sanic-cookies/month"><img alt="Monthly Downloads" src="https://pepy.tech/badge/sanic-cookies/month" /></a>
</p>
</p>
# Sanic Cookies
Much of the code here is borrowed from [sanic_session](https://github.com/xen/sanic_session).
I wanted to make some changes that would break a big part of `sanic_session`'s API, so I decided to create this repo instead.
Sanic cookies supports both client side and server side cookies.
## Main deviations from sanic_session are
- **Interfaces are only responsible for reading/writing the `SessionDict`:**
Session management logic is handled by the `Session` object
- **No race conditions**:
*By using:*
```python 3.7
async with request['session']:
request['session']['foo'] = 'bar'
```
*instead of:*
```python 3.7
request['session']['foo'] = 'bar'
```
It is still however possible to use the `session_dict` without a context manager, but it will raise some warnings,
unless it's explicitly turned off (warn_lock=False)
**Note:**
The locking mechanism used here only keeps track of locks on a thread-level, which means, an application that is horizontally scaled or one that runs on more than one process won't fully benefit from the locking mechanism that sanic-cookies currently has in place and might encounter some race conditions.
I have plans to introduce a distributed locking mechanism. Probably using something like: [Aioredlock](https://github.com/joanvila/aioredlock).
But for now, you should know that the locking mechanism that is currently in place will not work in a multi-process environment.
- **A simpler implementation of SessionDict that helps me sleep in peace at night. (Probably less performant)**
- **In memory interface schedules cleanup to avoid running out of memory**
- **Encrypted client side cookie interface**
- **Ability to add more than one interface to the same session**
- **Authenticated Session implementation**
## Setup ⚙️
```bash
$ pip install sanic_cookies
```
## Quick Start
```python 3.7
from sanic_cookies import Session, InMemory
from sanic import Sanic
app = Sanic()
Session(app, master_interface=InMemory())
@app.route('/')
async def handler(request):
async with request['session'] as sess:
sess['foo'] = 'bar'
```
## Usage
### Running multiple interfaces
```python 3.7
from sanic_cookies import Session, InMemory, Aioredis
from sanic import Sanic
inmem = InMemory()
aioredis = AioRedis(aioredis_pool_instance)
app = Sanic()
sess = Session(app, master_interface=inmem, session_name='my_1st_sess')
sess.add_interface(aioredis)
@app.route('/')
async def index(request):
async with request['my_1st_session'] as sess:
sess['foo'] = 'bar'
# At this point 'foo' = 'bar' is written both to the inmemory
# interface and the aioredis interface
async with request['my_1st_session'] as sess:
# When reading, your session will always read from the "master_interface"
# In that case it's the inmem interface
assert sess['foo'] == 'bar'
# Such pattern can be useful in many cases
# e.g. you want to share your session information with an analytics team
```
### Running multiple sessions
```python 3.7
from sanic_cookies import Session, AuthSession, InMemory, InCookieEncrypted, AioRedis
from sanic import Sanic
inmem = InMemory()
aioredis = Aioredis(aioredis_pool_instance)
incookie = InCookieEncrypted(b'fernetsecretkey')
app = Sanic()
incookie_session = Session(
app,
master_interface=incookie,
session_name='incookiesess',
cookie_name='INCOOKIE'
)
generic_session = Session(
app,
master_interface=inmem,
session_name='session',
cookie_name='SESSION'
)
auth_session = AuthSession(
app,
master_interface=aioredis,
session_name='auth_session',
cookie_name='SECURE_SESSION'
)
# for production (HTTPs) set `secure=True` in your auth_session,
# but this will fail in local development
@app.route('/')
async def index(request):
async with request['incookie_session'] as sess:
sess['foo'] = 'bar'
async with request['session'] as sess:
sess['bar'] = 'baz'
async with request['auth_session'] as sess:
sess['baz'] = 'foo'
```
### AuthSession
Following up on the previous example:
```python 3.7
from sanic_cookies import login_required
@app.route('/login')
async def login(request):
# 1. User verification logic
# both will work (Whatever is json serializble will)
# If you want to pickle an object simply change the default
# encoder&decoder in the interfaces plugged in to your AuthSession
authorized_user = 123
authorized_user = {'user_id': 123, 'email': '[email protected]'}
# 2. Login user
# Here we access the session object
# (not the session dict that is accessible from the request) from the app
await request.app.exts.auth_session.login_user(request, authorized_user)
# 3. Use the session dict safely and exclusively for the logged in user
async with request['auth_session'] as sess:
sess['foo'] = 'bar'
current_user = sess['current_user']
assert current_user == await request.app.exts.auth_session.current_user()
@app.route('/logout')
async def logout(request):
async with request['auth_session'] as sess:
assert sess['foo'] == 'bar' # From before
await request.app.exts.auth_session.logout_user(request) # Resets the session
async with request['auth_session'] as sess:
assert sess.get('foo') is None # should never fail
assert sess.get('current_user') is None # should never fail
@app.route('/protected')
@login_required()
async def protected(request):
assert await request.app.exts.auth_session.current_user() is not None # should never fail
```
## Interfaces available
1. In memory
``` python 3.7
from sanic_cookies import Session, InMemory
from sanic import Sanic
interface = InMemory()
app = Sanic()
Session(app, master_interface=interface)
# You can skip this part if you don't want scheduled interface cleanup
@app.listener('before_server_start')
def init_inmemory(app, loop):
interface.init()
@app.listener('after_server_stop')
def kill_inmemory(app, loop):
interface.kill()
@app.route('/')
async def handler(request):
async with request['session'] as sess:
sess['foo'] = 'bar'
```
2. Aioredis
```python 3.7
from aioredis import Aioredis
from sanic_cookies import Aioredis as AioredisInterface
from sanic import Sanic
app = Sanic()
aioredis_pool_instance = Aioredis()
aioredis = AioredisInterface(aioredis_pool_instance)
Session(app, master_interface=interface)
@app.route('/')
async def handler(request):
async with request['session'] as sess:
sess['foo'] = 'bar'
```
3. Encrypted in-cookie (using the amazing cryptography.Fernet library)
i. Open a Python terminal and generate a new Fernet key:
```python 3.7
>>> from cryptography.fernet import Fernet
>>> SESSION_KEY = Fernet.generate_key()
>>> print(SESSION_KEY)
b'copy me to your sanic app and keep me really secure'
```
ii. Write your app
```python 3.7
from sanic import Sanic
from sanic_cookies import Session, InCookieEncrypted
app = Sanic()
app.config.SESSION_KEY = SESSION_KEY
Session(
app,
master_interface=InCookieEncrypted(app.config.SESSION_KEY),
)
@app.route('/')
async def handler(request):
async with request['session'] as sess:
sess['foo'] = 'bar'
```
4. Gino-AsyncPG (Postgres 9.5+):
i. Manually create a table:
```sql
CREATE TABLE IF NOT EXISTS sessions
(
created_at timestamp without time zone NOT NULL,
expires_at timestamp without time zone,
sid character varying,
val character varying,
CONSTRAINT sessions_pkey PRIMARY KEY (sid)
);
```
ii. Add the interface:
```python 3.7
from sanic import Sanic
from gino.ext.sanic import Gino
from sanic_cookies import GinoAsyncPG
from something_secure import DB_SETTINGS
app = Sanic()
app.config.update(DB_SETTINGS)
db = Gino()
db.init_app(app)
interface = GinoAsyncPG(client=db)
auth_session = AuthSession(app, master_interface=interface)
if __name__ == '__main__':
app.run(host='127.0.0.1', port='8080')
```
## Sessions available
1. Session (A generic session interface)
2. AuthSession (A session interface with login_user, logout_user, current_user logic)
## Other pluggable parts
1. Encoders and Decoders (Default to ujson)
2. SID factory (Default to uuid.uuid4)
3. Session dict implementation
## Contact 📧
I currently work as a freelance software devloper. Like my work and got a gig for me?
Want to hire me fulltime? Send me an email @ [email protected]
## Buy me a coffee ☕
**Bitcoin:** 3NmywNKr1Lzo8gyNXFUnzvboziACpEa31z
**Ethereum:** 0x1E1400C31Cd813685FE0f6D29E0F91c1Da4675aE
**Bitcoin Cash:** qqzn7rsav6hr3zqcp4829s48hvsvjat4zq7j42wkxd
**Litecoin:** MB5M3cE3jE4E8NwGCWoFjLvGqjDqPyyEJp
**Paypal:** https://paypal.me/omarryhan
|
/sanic_cookies-0.4.3.tar.gz/sanic_cookies-0.4.3/README.md
| 0.683208 | 0.931649 |
README.md
|
pypi
|
import traceback
from math import ceil
import datetime
from playhouse.shortcuts import model_to_dict
from sanic.log import log
from ..resources.base_resource import BaseResource
def collection_filter(func):
def wrapped(self, request, *args, **kwargs):
model = self.model
shortcuts = model.shortcuts
fields = shortcuts.fields
response_messages = self.config.response_messages
query = model.select()
# Iterate over args and split the filters
for key, value in request.args.items():
# skip over include foreign_keys flag
if key == 'foreign_keys' or key == 'backrefs' or key == 'page':
continue
filter_parts = key.split('__')
field = filter_parts[0]
comparison = '='
value = value[0] # Value comes in as an array with a single argument? TODO: Re-evaluate this!
# If the length is 2, then there is a filter component
if len(filter_parts) == 2:
comparison = filter_parts[1]
# Validate that a supported comparison is used
if comparison not in self.config.FILTER_OPTIONS:
return self.response_json(status_code=400,
message=response_messages.ErrorInvalidFilterOption.format(comparison, shortcuts.FILTER_OPTIONS))
# Validate that the field is part of the table
if field not in fields:
return self.response_json(status_code=400,
message=response_messages.ErrorInvalidField.format(key, fields.keys()))
log.error(value)
# Validate that the value is the correct type
if comparison in ['in', 'notin']:
value = value.split(',')
else:
value = [value]
if comparison != 'null':
for item in value:
field_type_invalid = _validate_field_type(self, fields.get(field), item)
if field_type_invalid:
return field_type_invalid
model_field = getattr(model, field)
# Build the query from comparisons
if comparison == '=':
query = query.where(model_field == value)
elif comparison == 'null':
query = query.where(model_field.is_null(True if value == 1 else False))
elif comparison == 'startswith':
query = query.where(model_field.startswith(value))
elif comparison == 'contains':
query = query.where(model_field.contains(value))
elif comparison == 'lt':
query = query.where(model_field < value)
elif comparison == 'lte':
query = query.where(model_field <= value)
elif comparison == 'gt':
query = query.where(model_field > value)
elif comparison == 'gte':
query = query.where(model_field >= value)
elif comparison == 'in':
query = query.where(model_field << value)
elif comparison == 'notin':
query = query.where(~(model_field << value))
kwargs['filtered_results'] = query
return func(self, request, *args, **kwargs)
return wrapped
# Helper function, takes in a database field and an input value to make sure the input is the correct type for the db
def _validate_field_type(self, field, value):
expected_field_type = field.db_field
response_messages = self.config.response_messages
if expected_field_type in ['int', 'bool']:
try:
int(value)
except (ValueError, TypeError):
return self.response_json(status_code=400,
message=response_messages.ErrorTypeInteger.format(value) if expected_field_type == 'int' else response_messages.ErrorTypeBoolean.format(value))
elif expected_field_type == 'datetime':
try:
int(value)
except Exception:
try:
datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
except (ValueError, TypeError):
return self.response_json(status_code=400,
message=response_messages.ErrorTypeDatetime.format(value))
return False
# Resource for multiple objects
class BaseCollectionResource(BaseResource):
@collection_filter
async def get(self, request, **kwargs):
try:
response_messages = self.config.response_messages
# Verify page is an int
try:
page = int(request.args.get('page', 1))
except ValueError:
return self.response_json(status_code=400,
message=response_messages.ErrorTypeInteger.format('page'))
include_backrefs = False
include_foreign_keys = False
if 'backrefs' in request.args and request.args['backrefs'][0] == 'true':
include_backrefs = True
include_foreign_keys = True
elif 'foreign_keys' in request.args and request.args['foreign_keys'][0] == 'true':
include_foreign_keys = True
results = []
data = kwargs.get('filtered_results')
total_records = data.count()
total_pages = ceil(total_records / self.config.COLLECTION_MAX_RESULTS_PER_PAGE)
data = data.paginate(page, self.config.COLLECTION_MAX_RESULTS_PER_PAGE)
for row in data:
results.append(model_to_dict(row, recurse=include_foreign_keys, backrefs=include_backrefs))
return self.response_json(data=results,
status_code=200,
message=response_messages.SuccessOk,
page=page,
total_pages=total_pages)
except Exception as e:
log.error(traceback.print_exc())
return self.response_json(message=str(e),
status_code=500
)
async def post(self, request):
valid_request = self.validate_request(request)
if valid_request is not True:
return valid_request
try:
result = self.model.create(**request.json)
return self.response_json(data=model_to_dict(result),
status_code=200,
message=self.config.response_messages.SuccessRowCreated.format(result.id)
)
except Exception as e:
log.error(traceback.print_exc())
return self.response_json(message=str(e),
status_code=500
)
|
/sanic_crud-0.2.4.tar.gz/sanic_crud-0.2.4/sanic_crud/resources/collection_resource.py
| 0.474631 | 0.217628 |
collection_resource.py
|
pypi
|
from copy import deepcopy
from inspect import getmro
from typing import Type
from pydantic import BaseModel, ValidationError
from sanic.exceptions import InvalidUsage, SanicException, ServerError
from sanic.log import error_logger
from sanic.request import Request
class ParsedArgsObj(dict):
def __getattr__(self, item):
return self.get(item)
def __setattr__(self, key, value):
self.update({key: value})
def __deepcopy__(self, memo=None):
return ParsedArgsObj(deepcopy(dict(self), memo=memo))
class DanticModelObj:
def __init__(
self,
header: Type[BaseModel] = None,
query: Type[BaseModel] = None,
path: Type[BaseModel] = None,
body: Type[BaseModel] = None,
form: Type[BaseModel] = None,
error: Type[SanicException] = None,
) -> None:
"""
The param must be a BaseModel class or must inherit from BaseModel \n
if listed, the same model name's model will use strict mode
"""
try:
if body and form:
raise AssertionError(
"sanic-dantic: " +
"body and form cannot be used at the same time."
)
self.items = {
"header": header,
"path": path,
"query": query,
"form": form,
"body": body,
"error": error
}
for model in [header, path, query, form, body]:
if model and BaseModel not in getmro(model):
raise AssertionError(
"sanic-dantic: " +
"model must inherited from Pydantic.BaseModel"
)
if error and SanicException not in getmro(error):
raise AssertionError(
"sanic-dantic: " +
"error must inherited from SanicException"
)
except AssertionError as e:
error_logger.error(e)
raise ServerError(str(e))
def __repr__(self):
return str(self.items)
def validate(
request: Request,
header: Type[BaseModel] = None,
query: Type[BaseModel] = None,
path: Type[BaseModel] = None,
body: Type[BaseModel] = None,
form: Type[BaseModel] = None,
error: Type[SanicException] = None
) -> ParsedArgsObj:
"""
When there are the same parameter name in the model,
the parameter in ParsedArgsObj will be overwritten,
The priorities is: body = form > query > path > header
"""
try:
parsed_args = ParsedArgsObj()
if header:
parsed_args.update(header(**request.headers).dict())
if path:
parsed_args.update(path(**request.match_info).dict())
if query:
params = {
key: val[0]
if len(val) == 1 else val for key, val in request.args.items()
}
parsed_args.update(query(**params).dict())
if form:
form_data = {
key: val[0]
if len(val) == 1 else val
for key, val in request.form.items()
}
parsed_args.update(form(**form_data).dict())
elif body:
parsed_args.update(body(**request.json).dict())
except ValidationError as e:
# error handler function of sanic_dantic > default InvalidUsage
if error:
error_msg = e.errors()[0]
message = f'{error_msg.get("loc")[0]} {error_msg.get("msg")}'
raise error(message)
else:
error_msg = e.errors()[0]
message = f'{error_msg.get("loc")[0]} {error_msg.get("msg")}'
error_logger.error(message)
raise InvalidUsage(message)
except Exception as e:
raise e
request.ctx.params = parsed_args
return parsed_args
|
/sanic_dantic-1.2.2-py3-none-any.whl/sanic_dantic/basic_definition.py
| 0.75037 | 0.194789 |
basic_definition.py
|
pypi
|
from sanic_discord.rest import RestClient
from typing import List
class HttpClient(RestClient):
"""
Discord http client for oauth2.
"""
def fetch_user(self, access_token: str) -> dict:
"""
Fetches the user's profile using an access token.
Args:
access_token (str): The access token to use."""
return self.request("GET", "/users/@me", headers={"Authorization": f"Bearer {access_token}"})
def fetch_guilds(self, access_token: str) -> List[dict]:
return self.request("GET", "/users/@me/guilds", headers={"Authorization": f"Bearer {access_token}"})
def exchange_code(
self, code: str, redirect_uri: str,
client_id: int, client_secret: str
) -> dict:
"""
Exchanges a code for an access token.
Args:
code (str): The code to exchange.
redirect_uri (str): The redirect URI.
client_id (int): The client ID.
client_secret (str): The client secret.
"""
return self.request("POST", "/oauth2/token", data={
"grant_type": "authorization_code",
"code": code,
"redirect_uri": redirect_uri,
"client_id": client_id,
"client_secret": client_secret
}, headers={
"Content-Type": "application/x-www-form-urlencoded"
})
def refresh_token(
self, refresh_token: str, client_id: int, client_secret: str
) -> dict:
"""
Refreshes an access token using a refresh token.
Args:
refresh_token (str): The refresh token to use.
"""
return self.request("POST", "/oauth2/token", data={
"grant_type": "refresh_token",
"refresh_token": refresh_token,
"client_id": client_id,
"client_secret": client_secret
}, headers={
"Content-Type": "application/x-www-form-urlencoded"
})
async def add_guild(
self, guild_id: str, user_id: str, access_token: str
) -> None:
await self.request(
"GET", f"//guilds/{guild_id}/members/{user_id}",
params={"access_token": access_token}
)
|
/sanic-discord-2.0.1.tar.gz/sanic-discord-2.0.1/sanic_discord/oauth/http.py
| 0.929784 | 0.202778 |
http.py
|
pypi
|
"Oauth2 client for sanic."
from sanic import Sanic, Request, HTTPResponse
from typing import List, Optional, Tuple
from functools import wraps
from urllib import parse
from .errors import OauthException, StateError
from .access_token import AccessToken
from .http import HttpClient
class Oauth2:
"""
The following methods are used to generate the URL to redirect to.
Args:
app (Sanic): The Sanic app.
client_id (int): The client ID.
client_secret (str): The client secret.
redirect_uri (str): The redirect URI.
Attributes:
app (Sanic): The Sanic app.
client_id (int): The client ID.
client_secret (str): The client secret.
redirect_uri (str): The redirect URI.
client (httpx.AsyncClient): The client used to make requests.
"""
def __init__(
self, app: Sanic, client_id: int, client_secret: str, redirect_uri: str
):
self.app = app
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
self.http = HttpClient()
self.app.ctx.oauth2: Oauth2 = self
async def close(self) -> None:
"""
Closes the client.
"""
await self.http.close()
def exchange_code(self, state: bool = False):
"""
Exchanges a code for an access token.
Args:
state (bool): If you use state in oauth url, you must do True.
Raises:
StateError: state is invalid.
"""
def decorator(func):
@wraps(func)
async def wrapper(request: Request, *args, **kwargs) -> HTTPResponse:
results = (await self._exchange_code(request, state)) + args
return await func(request, *results, **kwargs)
return wrapper
return decorator
async def _exchange_code(self, request: Request, state: bool = False) -> Tuple[AccessToken, ...]:
"""
Exchanges a code for an access token.
Args:
request (Request): The request.
state (bool): If you use state in oauth url, you must do True.
Raises:
StateError: state is invalid.
"""
args = []
if state:
if request.args.get("state"):
args.append(request.args.get("state"))
else:
raise StateError("state is required.")
code = request.args.get("code")
if code is None:
raise OauthException("code is invaild.")
args.insert(0, AccessToken(await self.http.exchange_code(
code, self.redirect_uri, self.client_id, self.client_secret
), self.http))
return tuple(args)
async def fetch_user(self, access_token: str) -> dict:
"""
Fetches the user's profile using an access token.
Args:
access_token (str): The access token to use.
Returns:
dict: The user's profile.
"""
return await self.http.fetch_user(access_token)
async def fetch_guilds(self, access_token: str) -> List[dict]:
return await self.http.fetch_guilds(access_token)
async def refresh_token(self, refresh_token: str) -> AccessToken:
"""
Refreshes an access token using a refresh token.
Args:
refresh_token (str): The refresh token to use.
Returns:
AccessToken: The new access token.
"""
return AccessToken(await self.http.refresh_token(
refresh_token, self.client_id, self.client_secret
), self.http)
def get_authorize_url(self, scope: List[str] = ["identify"], *, state: Optional[str] = None) -> str:
"""
Generates a URL to authorize the application.
Args:
scope (Optional[List[str]]): The scope to request. Defaults to ["identify"].
state (Optional[str]): The state to use. Defaults to None.
Returns:
str: The URL to authorize the application.
"""
payload = {
"client_id": self.client_id,
"scope": ' '.join(scope),
"response_type": "code",
"redirect_uri": self.redirect_uri
}
if state is not None:
payload["state"] = state
return f"{self.http.BASEURL}/oauth2/authorize?" + parse.urlencode(payload)
|
/sanic-discord-2.0.1.tar.gz/sanic-discord-2.0.1/sanic_discord/oauth/oauth.py
| 0.900232 | 0.154823 |
oauth.py
|
pypi
|
from __future__ import annotations
from typing import Any, Callable, Dict, Optional, Set, Tuple, Type
from sanic.app import Sanic
from sanic.config import Config
from .constructor import Constructor
class InjectionRegistry:
def __init__(self):
self._registry: Dict[Type, Optional[Callable[..., Any]]] = {}
def __getitem__(self, key):
return self._registry[key]
def __str__(self) -> str:
return str(self._registry)
def __contains__(self, other: Any):
return other in self._registry
def get(self, key, default=None):
return self._registry.get(key, default)
def register(
self,
_type: Type,
constructor: Optional[Callable[..., Any]],
request_arg: Optional[str] = None,
) -> None:
constructor = constructor or _type
constructor = Constructor(constructor, request_arg=request_arg)
self._registry[_type] = constructor
def finalize(
self, app: Sanic, constant_registry: ConstantRegistry, allowed_types
):
for constructor in self._registry.values():
if isinstance(constructor, Constructor):
constructor.prepare(
app, self, constant_registry, allowed_types
)
@property
def length(self):
return len(self._registry)
class SignatureRegistry:
def __init__(self):
self._registry: Dict[
str,
Tuple[
Dict[str, Tuple[Type, Optional[Callable[..., Any]]]],
Dict[str, Any],
],
] = {}
def __getitem__(self, key):
return self._registry[key]
def __str__(self) -> str:
return str(self._registry)
def get(self, key, default=None):
return self._registry.get(key, default)
def register(
self,
route_name: str,
dependencies: Dict[str, Tuple[Type, Optional[Callable[..., Any]]]],
constants: Optional[Dict[str, Any]] = None,
) -> None:
self._registry[route_name] = (dependencies, constants or {})
class ConstantRegistry:
def __init__(self, config: Config):
self._config = config
self._registry: Set[str] = set()
def __str__(self) -> str:
return str(self._registry)
def __iter__(self):
return iter(self._registry)
def __getitem__(self, key):
return self._registry[key]
def __contains__(self, other: Any):
return other in self._registry
def register(self, key: str, value: Any, overwrite: bool):
attribute = key.upper()
if attribute in self._config and not overwrite:
raise ValueError(
f"A value for {attribute} has already been assigned"
)
key = key.lower()
setattr(self._config, attribute, value)
return self._registry.add(key)
def get(self, key: str):
key = key.lower()
if key not in self._registry:
raise ValueError
attribute = key.upper()
return getattr(self._config, attribute)
@property
def length(self):
return len(self._registry)
|
/sanic-ext-23.6.0.tar.gz/sanic-ext-23.6.0/sanic_ext/extensions/injection/registry.py
| 0.93362 | 0.202108 |
registry.py
|
pypi
|
from __future__ import annotations
from asyncio import sleep
from dataclasses import dataclass
from datetime import datetime, timedelta
from multiprocessing import Manager
from queue import Empty, Full
from signal import SIGINT, SIGTERM
from signal import signal as signal_func
from typing import TYPE_CHECKING, Optional
from sanic.application.constants import ServerStage
from sanic.log import logger
if TYPE_CHECKING:
from sanic import Sanic
class Stale(ValueError):
...
@dataclass
class HealthState:
name: str
last: Optional[datetime] = None
misses: int = 0
def report(self, timestamp: int) -> None:
logger.debug(f"Reporting {self.name}")
if self.misses:
logger.info(f"Recovered {self.name}")
self.last = datetime.fromtimestamp(timestamp)
self.misses = 0
def missed(self) -> None:
self.misses += 1
logger.info(
f"Missed health check for {self.name} "
f"({self.misses}/{HealthMonitor.MAX_MISSES})"
)
if self.misses >= HealthMonitor.MAX_MISSES:
raise Stale
def check(self) -> None:
if not self.last:
return
threshhold = timedelta(
seconds=(HealthMonitor.MISSED_THRESHHOLD * (self.misses + 1))
)
if self.last < (datetime.now() - threshhold):
self.missed()
def reset(self) -> None:
self.misses = 0
self.last = datetime.now()
def send_healthy(name, queue):
health = (name, datetime.now().timestamp())
logger.debug(f"Sending health: {health}", extra={"verbosity": 1})
try:
queue.put_nowait(health)
except Full:
...
async def health_check(app: Sanic):
sent = datetime.now()
while app.state.stage is ServerStage.SERVING:
now = datetime.now()
if sent < now - timedelta(seconds=HealthMonitor.REPORT_INTERVAL):
send_healthy(app.m.name, app.shared_ctx.health_queue)
sent = now
await sleep(0.1)
async def start_health_check(app: Sanic):
app.add_task(health_check(app), name="health_check")
async def prepare_health_monitor(app, *_):
HealthMonitor.prepare(app)
async def setup_health_monitor(app, *_):
health = HealthMonitor(app)
process_names = [
process.name for process in app.manager.transient_processes
]
app.manager.manage(
"HealthMonitor",
health,
{
"process_names": process_names,
"health_queue": app.shared_ctx.health_queue,
},
)
class HealthMonitor:
MAX_MISSES = 3
REPORT_INTERVAL = 5
MISSED_THRESHHOLD = 10
def __init__(self, app: Sanic):
self.run = True
self.monitor_publisher = app.manager.monitor_publisher
def __call__(self, process_names, health_queue) -> None:
signal_func(SIGINT, self.stop)
signal_func(SIGTERM, self.stop)
now = datetime.now()
health_state = {
process_name: HealthState(last=now, name=process_name)
for process_name in process_names
}
while self.run:
try:
name, timestamp = health_queue.get(timeout=0.05)
except Empty:
...
else:
health_state[name].report(timestamp)
for state in health_state.values():
try:
state.check()
except Stale:
state.reset()
self.monitor_publisher.send(state.name)
def stop(self, *_):
self.run = False
@classmethod
def prepare(cls, app: Sanic):
sync_manager = Manager()
health_queue = sync_manager.Queue(maxsize=app.state.workers * 2)
app.shared_ctx.health_queue = health_queue
@classmethod
def setup(
cls,
app: Sanic,
max_misses: Optional[int] = None,
report_interval: Optional[int] = None,
missed_threshhold: Optional[int] = None,
):
HealthMonitor.MAX_MISSES = max_misses or app.config.HEALTH_MAX_MISSES
HealthMonitor.REPORT_INTERVAL = (
report_interval or app.config.HEALTH_REPORT_INTERVAL
)
HealthMonitor.MISSED_THRESHHOLD = (
missed_threshhold or app.config.HEALTH_MISSED_THRESHHOLD
)
app.main_process_start(prepare_health_monitor)
app.main_process_ready(setup_health_monitor)
app.after_server_start(start_health_check)
|
/sanic-ext-23.6.0.tar.gz/sanic-ext-23.6.0/sanic_ext/extensions/health/monitor.py
| 0.746509 | 0.190347 |
monitor.py
|
pypi
|
from functools import wraps
from inspect import isawaitable, isclass
from typing import (
Any,
Callable,
Dict,
List,
Literal,
Optional,
Sequence,
Type,
TypeVar,
Union,
overload,
)
from sanic import Blueprint
from sanic.exceptions import InvalidUsage, SanicException
from sanic_ext.extensions.openapi import definitions
from sanic_ext.extensions.openapi.builders import (
OperationStore,
SpecificationBuilder,
)
from sanic_ext.extensions.openapi.definitions import Component
from sanic_ext.extensions.openapi.types import (
Array,
Binary,
Boolean,
Byte,
Date,
DateTime,
Double,
Email,
Float,
Integer,
Long,
Object,
Password,
Schema,
String,
Time,
)
from sanic_ext.extras.validation.setup import do_validation, generate_schema
from sanic_ext.utils.extraction import extract_request
__all__ = (
"definitions",
"body",
"component",
"definition",
"deprecated",
"description",
"document",
"exclude",
"no_autodoc",
"operation",
"parameter",
"response",
"secured",
"summary",
"tag",
"Array",
"Binary",
"Boolean",
"Byte",
"Component",
"Date",
"DateTime",
"Double",
"Email",
"Float",
"Integer",
"Long",
"Object",
"Password",
"String",
"Time",
)
def _content_or_component(content):
if isclass(content):
spec = SpecificationBuilder()
if spec._components["schemas"].get(content.__name__):
content = definitions.Component(content)
return content
@overload
def exclude(flag: bool = True, *, bp: Blueprint) -> None:
...
@overload
def exclude(flag: bool = True) -> Callable:
...
def exclude(flag: bool = True, *, bp: Optional[Blueprint] = None):
if bp:
for route in bp.routes:
exclude(flag)(route.handler)
return
def inner(func):
OperationStore()[func].exclude(flag)
return func
return inner
T = TypeVar("T")
def operation(name: str) -> Callable[[T], T]:
def inner(func):
OperationStore()[func].name(name)
return func
return inner
def summary(text: str) -> Callable[[T], T]:
def inner(func):
OperationStore()[func].describe(summary=text)
return func
return inner
def description(text: str) -> Callable[[T], T]:
def inner(func):
OperationStore()[func].describe(description=text)
return func
return inner
def document(
url: Union[str, definitions.ExternalDocumentation],
description: Optional[str] = None,
) -> Callable[[T], T]:
if isinstance(url, definitions.ExternalDocumentation):
description = url.fields["description"]
url = url.fields["url"]
def inner(func):
OperationStore()[func].document(url, description)
return func
return inner
def tag(*args: Union[str, definitions.Tag]) -> Callable[[T], T]:
def inner(func):
OperationStore()[func].tag(*args)
return func
return inner
def deprecated(maybe_func=None) -> Callable[[T], T]:
def inner(func):
OperationStore()[func].deprecate()
return func
return inner(maybe_func) if maybe_func else inner
def no_autodoc(maybe_func=None) -> Callable[[T], T]:
def inner(func):
OperationStore()[func].disable_autodoc()
return func
return inner(maybe_func) if maybe_func else inner
def body(
content: Any,
*,
validate: bool = False,
body_argument: str = "body",
**kwargs,
) -> Callable[[T], T]:
body_content = _content_or_component(content)
params = {**kwargs}
validation_schema = None
if isinstance(body_content, definitions.RequestBody):
params = {**body_content.fields, **params}
body_content = params.pop("content")
if validate:
if callable(validate):
model = validate
else:
model = body_content
validation_schema = generate_schema(body_content)
def inner(func):
@wraps(func)
async def handler(*handler_args, **handler_kwargs):
request = extract_request(*handler_args)
if validate:
try:
data = request.json
allow_multiple = False
allow_coerce = False
except InvalidUsage:
data = request.form
allow_multiple = True
allow_coerce = True
await do_validation(
model=model,
data=data,
schema=validation_schema,
request=request,
kwargs=handler_kwargs,
body_argument=body_argument,
allow_multiple=allow_multiple,
allow_coerce=allow_coerce,
)
retval = func(*handler_args, **handler_kwargs)
if isawaitable(retval):
retval = await retval
return retval
if func in OperationStore():
OperationStore()[handler] = OperationStore().pop(func)
OperationStore()[handler].body(body_content, **params)
return handler
return inner
@overload
def parameter(
*,
parameter: definitions.Parameter,
**kwargs,
) -> Callable[[T], T]:
...
@overload
def parameter(
name: None,
schema: None,
location: None,
parameter: definitions.Parameter,
**kwargs,
) -> Callable[[T], T]:
...
@overload
def parameter(
name: str,
schema: Optional[Union[Type, Schema]] = None,
location: Optional[str] = None,
parameter: None = None,
**kwargs,
) -> Callable[[T], T]:
...
def parameter(
name: Optional[str] = None,
schema: Optional[Union[Type, Schema]] = None,
location: Optional[str] = None,
parameter: Optional[definitions.Parameter] = None,
**kwargs,
) -> Callable[[T], T]:
if parameter:
if name or schema or location:
raise SanicException(
"When using a parameter object, you cannot pass "
"other arguments."
)
if not schema:
schema = str
if not location:
location = "query"
def inner(func: Callable):
if parameter:
# Temporary solution convert in to location,
# need to be changed later.
fields = dict(parameter.fields)
if "in" in fields:
fields["location"] = fields.pop("in")
OperationStore()[func].parameter(**fields)
else:
OperationStore()[func].parameter(name, schema, location, **kwargs)
return func
return inner
def response(
status: Union[Literal["default"], int] = "default",
content: Any = str,
description: Optional[str] = None,
*,
response: Optional[definitions.Response] = None,
**kwargs,
) -> Callable[[T], T]:
if response:
if status != "default" or content != str or description is not None:
raise SanicException(
"When using a response object, you cannot pass "
"other arguments."
)
status = response.fields["status"]
content = response.fields["content"]
description = response.fields["description"]
def inner(func):
OperationStore()[func].response(status, content, description, **kwargs)
return func
return inner
def secured(*args, **kwargs) -> Callable[[T], T]:
def inner(func):
OperationStore()[func].secured(*args, **kwargs)
return func
return inner
Model = TypeVar("Model")
def component(
model: Optional[Model] = None,
*,
name: Optional[str] = None,
field: str = "schemas",
) -> Callable[[T], T]:
def wrap(m):
return component(m, name=name, field=field)
if not model:
return wrap
params = {}
if name:
params["name"] = name
if field:
params["field"] = field
definitions.Component(model, **params)
return model
def definition(
*,
exclude: Optional[bool] = None,
operation: Optional[str] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
document: Optional[Union[str, definitions.ExternalDocumentation]] = None,
tag: Optional[
Union[
Union[str, definitions.Tag], Sequence[Union[str, definitions.Tag]]
]
] = None,
deprecated: bool = False,
body: Optional[Union[Dict[str, Any], definitions.RequestBody, Any]] = None,
parameter: Optional[
Union[
Union[Dict[str, Any], definitions.Parameter, str],
List[Union[Dict[str, Any], definitions.Parameter, str]],
]
] = None,
response: Optional[
Union[
Union[Dict[str, Any], definitions.Response, Any],
List[Union[Dict[str, Any], definitions.Response]],
]
] = None,
secured: Optional[Dict[str, Any]] = None,
validate: bool = False,
body_argument: str = "body",
) -> Callable[[T], T]:
validation_schema = None
body_content = None
def inner(func):
nonlocal validation_schema
nonlocal body_content
glbl = globals()
if body:
kwargs = {}
content = body
if isinstance(content, definitions.RequestBody):
kwargs = content.fields
elif isinstance(content, dict):
if "content" in content:
kwargs = content
else:
kwargs["content"] = content
else:
content = _content_or_component(content)
kwargs["content"] = content
if validate:
kwargs["validate"] = validate
kwargs["body_argument"] = body_argument
func = glbl["body"](**kwargs)(func)
if exclude is not None:
func = glbl["exclude"](exclude)(func)
if operation:
func = glbl["operation"](operation)(func)
if summary:
func = glbl["summary"](summary)(func)
if description:
func = glbl["description"](description)(func)
if document:
kwargs = {}
if isinstance(document, str):
kwargs["url"] = document
else:
kwargs["url"] = document.fields["url"]
kwargs["description"] = document.fields["description"]
func = glbl["document"](**kwargs)(func)
if tag:
taglist = []
op = (
"extend"
if isinstance(tag, (list, tuple, set, frozenset))
else "append"
)
getattr(taglist, op)(tag)
func = glbl["tag"](*taglist)(func)
if deprecated:
func = glbl["deprecated"]()(func)
if parameter:
paramlist = []
op = (
"extend"
if isinstance(parameter, (list, tuple, set, frozenset))
else "append"
)
getattr(paramlist, op)(parameter)
for param in paramlist:
kwargs = {}
if isinstance(param, definitions.Parameter):
kwargs = param.fields
if "in" in kwargs:
kwargs["location"] = kwargs.pop("in")
elif isinstance(param, dict) and "name" in param:
kwargs = param
elif isinstance(param, str):
kwargs["name"] = param
else:
raise SanicException(
"parameter must be a Parameter instance, a string, or "
"a dictionary containing at least 'name'."
)
if "schema" not in kwargs:
kwargs["schema"] = str
func = glbl["parameter"](**kwargs)(func)
if response:
resplist = []
op = (
"extend"
if isinstance(response, (list, tuple, set, frozenset))
else "append"
)
getattr(resplist, op)(response)
if len(resplist) > 1 and any(
not isinstance(item, definitions.Response)
and not isinstance(item, dict)
for item in resplist
):
raise SanicException(
"Cannot use multiple bare custom models to define "
"multiple responses like openapi.definition(response=["
"MyModel1, MyModel2]). Instead, you should wrap them in a "
"dict or a Response object. See "
"https://sanic.dev/en/plugins/sanic-ext/openapi/decorators"
".html#response for more details."
)
for resp in resplist:
kwargs = {}
if isinstance(resp, definitions.Response):
kwargs = resp.fields
elif isinstance(resp, dict):
if "content" in resp:
kwargs = resp
else:
kwargs["content"] = resp
else:
kwargs["content"] = resp
if "status" not in kwargs:
kwargs["status"] = "default"
func = glbl["response"](**kwargs)(func)
if secured:
func = glbl["secured"](secured)(func)
return func
return inner
|
/sanic-ext-23.6.0.tar.gz/sanic-ext-23.6.0/sanic_ext/extensions/openapi/openapi.py
| 0.781664 | 0.159774 |
openapi.py
|
pypi
|
import json
import uuid
from dataclasses import MISSING, is_dataclass
from datetime import date, datetime, time
from enum import Enum
from inspect import getmembers, isclass, isfunction, ismethod
from typing import (
Any,
Dict,
List,
Optional,
Union,
get_args,
get_origin,
get_type_hints,
)
from sanic_routing.patterns import alpha, ext, nonemptystr, parse_date, slug
from sanic_ext.utils.typing import (
UnionType,
is_attrs,
is_generic,
is_msgspec,
is_pydantic,
)
try:
import attrs
NOTHING: Any = attrs.NOTHING
except ImportError:
NOTHING = object()
try:
import msgspec
from msgspec.inspect import Metadata as MsgspecMetadata
from msgspec.inspect import type_info as msgspec_type_info
MsgspecMetadata: Any = MsgspecMetadata
NODEFAULT: Any = msgspec.NODEFAULT
UNSET: Any = msgspec.UNSET
class MsgspecAdapter(msgspec.Struct):
name: str
default: Any
metadata: dict
except ImportError:
def msgspec_type_info(struct):
pass
class MsgspecAdapter:
pass
MsgspecMetadata = object()
NODEFAULT = object()
UNSET = object()
class Definition:
__nullable__: Optional[List[str]] = []
__ignore__: Optional[List[str]] = []
def __init__(self, **kwargs):
self._fields: Dict[str, Any] = self.guard(kwargs)
@property
def fields(self):
return self._fields
def guard(self, fields):
return {
k: v
for k, v in fields.items()
if k in _properties(self).keys() or k.startswith("x-")
}
def serialize(self):
return {
k: self._value(v)
for k, v in _serialize(self.fields).items()
if (
k not in self.__ignore__
and (
v is not None
or (
isinstance(self.__nullable__, list)
and (not self.__nullable__ or k in self.__nullable__)
)
)
)
}
def __str__(self):
return json.dumps(self.serialize())
@staticmethod
def _value(value):
if isinstance(value, Enum):
return value.value
return value
class Schema(Definition):
title: str
description: str
type: str
format: str
nullable: bool
required: bool
default: None
example: None
oneOf: List[Definition]
anyOf: List[Definition]
allOf: List[Definition]
additionalProperties: Dict[str, str]
multipleOf: int
maximum: int
exclusiveMaximum: bool
minimum: int
exclusiveMinimum: bool
maxLength: int
minLength: int
pattern: str
enum: Union[List[Any], Enum]
@staticmethod
def make(value, **kwargs):
_type = type(value)
origin = get_origin(value)
args = get_args(value)
if origin in (Union, UnionType):
if type(None) in args:
kwargs["nullable"] = True
filtered = [arg for arg in args if arg is not type(None)] # noqa
if len(filtered) == 1:
return Schema.make(filtered[0], **kwargs)
return Schema(
oneOf=[Schema.make(arg) for arg in filtered], **kwargs
)
for field in ("type", "format"):
kwargs.pop(field, None)
if isinstance(value, Schema):
return value
if value == bool:
return Boolean(**kwargs)
elif value == int:
return Integer(**kwargs)
elif value == float:
return Float(**kwargs)
elif value == str or value in (nonemptystr, ext, slug, alpha):
return String(**kwargs)
elif value == bytes:
return Byte(**kwargs)
elif value == bytearray:
return Binary(**kwargs)
elif value == date:
return Date(**kwargs)
elif value == time:
return Time(**kwargs)
elif value == datetime or value is parse_date:
return DateTime(**kwargs)
elif value == uuid.UUID:
return UUID(**kwargs)
elif value == Any:
return AnyValue(**kwargs)
if _type == bool:
return Boolean(default=value, **kwargs)
elif _type == int:
return Integer(default=value, **kwargs)
elif _type == float:
return Float(default=value, **kwargs)
elif _type == str:
return String(default=value, **kwargs)
elif _type == bytes:
return Byte(default=value, **kwargs)
elif _type == bytearray:
return Binary(default=value, **kwargs)
elif _type == date:
return Date(**kwargs)
elif _type == time:
return Time(**kwargs)
elif _type == datetime:
return DateTime(**kwargs)
elif _type == uuid.UUID:
return UUID(**kwargs)
elif _type == list:
if len(value) == 0:
schema = Schema(nullable=True)
elif len(value) == 1:
schema = Schema.make(value[0])
else:
schema = Schema(oneOf=[Schema.make(x) for x in value])
return Array(schema, **kwargs)
elif _type == dict:
return Object.make(value, **kwargs)
elif (
(is_generic(value) or is_generic(_type))
and origin == dict
and len(args) == 2
):
kwargs["additionalProperties"] = Schema.make(args[1])
return Object(**kwargs)
elif (is_generic(value) or is_generic(_type)) and origin == list:
kwargs.pop("items", None)
return Array(Schema.make(args[0]), **kwargs)
elif _type is type(Enum):
available = [item.value for item in value.__members__.values()]
available_types = list({type(item) for item in available})
schema_type = (
available_types[0] if len(available_types) == 1 else "string"
)
return Schema.make(
schema_type,
enum=[item.value for item in value.__members__.values()],
)
else:
return Object.make(value, **kwargs)
class Boolean(Schema):
def __init__(self, **kwargs):
super().__init__(type="boolean", **kwargs)
class Integer(Schema):
def __init__(self, **kwargs):
super().__init__(type="integer", format="int32", **kwargs)
class Long(Schema):
def __init__(self, **kwargs):
super().__init__(type="integer", format="int64", **kwargs)
class Float(Schema):
def __init__(self, **kwargs):
super().__init__(type="number", format="float", **kwargs)
class Double(Schema):
def __init__(self, **kwargs):
super().__init__(type="number", format="double", **kwargs)
class String(Schema):
def __init__(self, **kwargs):
super().__init__(type="string", **kwargs)
class Byte(Schema):
def __init__(self, **kwargs):
super().__init__(type="string", format="byte", **kwargs)
class Binary(Schema):
def __init__(self, **kwargs):
super().__init__(type="string", format="binary", **kwargs)
class Date(Schema):
def __init__(self, **kwargs):
super().__init__(type="string", format="date", **kwargs)
class Time(Schema):
def __init__(self, **kwargs):
super().__init__(type="string", format="time", **kwargs)
class DateTime(Schema):
def __init__(self, **kwargs):
super().__init__(type="string", format="date-time", **kwargs)
class Password(Schema):
def __init__(self, **kwargs):
super().__init__(type="string", format="password", **kwargs)
class Email(Schema):
def __init__(self, **kwargs):
super().__init__(type="string", format="email", **kwargs)
class UUID(Schema):
def __init__(self, **kwargs):
super().__init__(type="string", format="uuid", **kwargs)
class AnyValue(Schema):
@classmethod
def make(cls, value: Any, **kwargs):
return cls(
AnyValue={},
**kwargs,
)
class Object(Schema):
properties: Dict[str, Schema]
maxProperties: int
minProperties: int
def __init__(
self, properties: Optional[Dict[str, Schema]] = None, **kwargs
):
if properties:
kwargs["properties"] = properties
super().__init__(type="object", **kwargs)
@classmethod
def make(cls, value: Any, **kwargs):
extra: Dict[str, Any] = {}
# Extract from field metadata if msgspec, pydantic, attrs, or dataclass
if isclass(value):
fields = ()
if is_pydantic(value):
try:
value = value.__pydantic_model__
except AttributeError:
...
extra = value.schema()["properties"]
elif is_attrs(value):
fields = value.__attrs_attrs__
elif is_dataclass(value):
fields = value.__dataclass_fields__.values()
elif is_msgspec(value):
# adapt to msgspec metadata layout -- annotated type --
# to match dataclass "metadata" attribute
fields = [
MsgspecAdapter(
name=f.name,
default=MISSING
if f.default in (UNSET, NODEFAULT)
else f.default,
metadata=getattr(f.type, "extra", {}),
)
for f in msgspec_type_info(value).fields
]
if fields:
extra = {
field.name: {
"title": field.name.title(),
**(
{"default": field.default}
if field.default not in (MISSING, NOTHING)
else {}
),
**dict(field.metadata).get("openapi", {}),
}
for field in fields
}
return cls(
{
k: Schema.make(v, **extra.get(k, {}))
for k, v in _properties(value).items()
},
**kwargs,
)
class Array(Schema):
items: Any
maxItems: int
minItems: int
uniqueItems: bool
def __init__(self, items: Any, **kwargs):
super().__init__(type="array", items=Schema.make(items), **kwargs)
def _serialize(value) -> Any:
if isinstance(value, Definition):
return value.serialize()
if isinstance(value, type) and issubclass(value, Enum):
return [item.value for item in value.__members__.values()]
if isinstance(value, dict):
return {k: _serialize(v) for k, v in value.items()}
if isinstance(value, list):
return [_serialize(v) for v in value]
return value
def _properties(value: object) -> Dict:
try:
fields = {
x: val
for x, v in getmembers(value, _is_property)
if (val := _extract(v)) and x in value.__dict__
}
except AttributeError:
fields = {}
cls = value if callable(value) else value.__class__
extra = value if isinstance(value, dict) else {}
try:
annotations = get_type_hints(cls)
except NameError:
if hasattr(value, "__annotations__"):
annotations = value.__annotations__
else:
annotations = {}
annotations.pop("return", None)
try:
output = {
k: v
for k, v in {**fields, **annotations, **extra}.items()
if not k.startswith("_")
and not (
isclass(v)
and isclass(cls)
and v.__qualname__.endswith(
f"{getattr(cls, '__name__', '<unknown>')}."
f"{getattr(v, '__name__', '<unknown>')}"
)
)
}
except TypeError:
return {}
return output
def _extract(item):
if isinstance(item, property):
hints = get_type_hints(item.fget)
return hints.get("return")
return item
def _is_property(item):
return not isfunction(item) and not ismethod(item)
|
/sanic-ext-23.6.0.tar.gz/sanic-ext-23.6.0/sanic_ext/extensions/openapi/types.py
| 0.77373 | 0.200773 |
types.py
|
pypi
|
import inspect
import warnings
import yaml
class OpenAPIDocstringParser:
def __init__(self, docstring: str):
"""
Args:
docstring (str): docstring of function to be parsed
"""
if docstring is None:
docstring = ""
self.docstring = inspect.cleandoc(docstring)
def to_openAPI_2(self) -> dict:
"""
Returns:
json style dict: dict to be read for the path by swagger 2.0 UI
"""
raise NotImplementedError()
def to_openAPI_3(self) -> dict:
"""
Returns:
json style dict: dict to be read for the path by swagger 3.0.0 UI
"""
raise NotImplementedError()
class YamlStyleParametersParser(OpenAPIDocstringParser):
def _parse_no_yaml(self, doc: str) -> dict:
"""
Args:
doc (str): section of doc before yaml, or full section of doc
Returns:
json style dict: dict to be read for the path by swagger UI
"""
# clean again in case further indentation can be removed,
# usually this do nothing...
doc = inspect.cleandoc(doc)
if len(doc) == 0:
return {}
lines = doc.split("\n")
if len(lines) == 1:
return {"summary": lines[0]}
else:
summary = lines.pop(0)
# remove empty lines at the beginning of the description
while len(lines) and lines[0].strip() == "":
lines.pop(0)
if len(lines) == 0:
return {"summary": summary}
else:
# use html tag to preserve linebreaks
return {"summary": summary, "description": "<br>".join(lines)}
def _parse_yaml(self, doc: str) -> dict:
"""
Args:
doc (str): section of doc detected as openapi yaml
Returns:
json style dict: dict to be read for the path by swagger UI
Warns:
UserWarning if the yaml couldn't be parsed
"""
try:
return yaml.safe_load(doc)
except Exception as e:
warnings.warn(
"error parsing openAPI yaml, ignoring it. ({})".format(e)
)
return {}
def _parse_all(self) -> dict:
if "openapi:\n" not in self.docstring:
return self._parse_no_yaml(self.docstring)
predoc, yamldoc = self.docstring.split("openapi:\n", 1)
conf = self._parse_no_yaml(predoc)
conf.update(self._parse_yaml(yamldoc))
return conf
def to_openAPI_2(self) -> dict:
return self._parse_all()
def to_openAPI_3(self) -> dict:
return self._parse_all()
|
/sanic-ext-23.6.0.tar.gz/sanic-ext-23.6.0/sanic_ext/extensions/openapi/autodoc.py
| 0.610337 | 0.331931 |
autodoc.py
|
pypi
|
from __future__ import annotations
"""
Builders for the oas3 object types
These are completely internal, so can be refactored if desired without concern
for breaking user experience
"""
from collections import defaultdict
from typing import TYPE_CHECKING, Optional, Sequence, Union, cast
from sanic_ext.extensions.openapi.constants import (
SecuritySchemeAuthorization,
SecuritySchemeLocation,
SecuritySchemeType,
)
from ...utils.route import remove_nulls, remove_nulls_from_kwargs
from .autodoc import YamlStyleParametersParser
from .definitions import (
Any,
Components,
Contact,
Dict,
ExternalDocumentation,
Flows,
Info,
License,
List,
OpenAPI,
Operation,
Parameter,
PathItem,
RequestBody,
Response,
SecurityRequirement,
SecurityScheme,
Server,
Tag,
)
if TYPE_CHECKING:
from sanic import Sanic
class OperationBuilder:
summary: str
description: str
operationId: str
requestBody: RequestBody
externalDocs: ExternalDocumentation
tags: List[str]
security: List[Any]
parameters: List[Parameter]
responses: Dict[str, Response]
callbacks: List[str] # TODO
deprecated: bool = False
def __init__(self):
self.tags = []
self.security = []
self.parameters = []
self.responses = {}
self._default = {}
self._autodoc = None
self._exclude = False
self._allow_autodoc = True
self._app: Optional[Sanic] = None
def name(self, value: str):
self.operationId = value
def describe(self, summary: str = None, description: str = None):
if summary:
self.summary = summary
if description:
self.description = description
def document(self, url: str, description: str = None):
self.externalDocs = ExternalDocumentation.make(url, description)
def tag(self, *args: str):
for arg in args:
if isinstance(arg, Tag):
arg = arg.fields["name"]
self.tags.append(arg)
def deprecate(self):
self.deprecated = True
def body(self, content: Any, **kwargs):
self.requestBody = RequestBody.make(content, **kwargs)
def parameter(
self, name: str, schema: Any, location: str = "query", **kwargs
):
self.parameters.append(
Parameter.make(name, schema, location, **kwargs)
)
def response(
self, status, content: Any = None, description: str = None, **kwargs
):
response = Response.make(content, description, **kwargs)
if status in self.responses:
self.responses[status]._fields["content"].update(
response.fields["content"]
)
else:
self.responses[status] = response
def secured(self, *args, **kwargs):
if not kwargs and len(args) == 1 and isinstance(args[0], dict):
items = args[0]
else:
items = {**{v: [] for v in args}, **kwargs}
gates = {}
for name, params in items.items():
gate = name.__name__ if isinstance(name, type) else name
gates[gate] = params
self.security.append(gates)
def disable_autodoc(self):
self._allow_autodoc = False
def build(self):
operation_dict = self._build_merged_dict()
if "responses" not in operation_dict:
# todo -- look into more consistent default response format
operation_dict["responses"] = {"default": {"description": "OK"}}
return Operation(**operation_dict)
def _build_merged_dict(self):
defined_dict = self.__dict__.copy()
autodoc_dict = self._autodoc or {}
default_dict = self._default
merged_dict = {}
for d in (default_dict, autodoc_dict, defined_dict):
cleaned = {
k: v for k, v in d.items() if v and not k.startswith("_")
}
merged_dict.update(cleaned)
return merged_dict
def autodoc(self, docstring: str):
y = YamlStyleParametersParser(docstring)
self._autodoc = y.to_openAPI_3()
def exclude(self, flag: bool = True):
self._exclude = flag
class OperationStore(defaultdict):
_singleton = None
def __new__(cls) -> Any:
if not cls._singleton:
cls._singleton = super().__new__(cls)
return cls._singleton
def __init__(self):
super().__init__(OperationBuilder)
@classmethod
def reset(cls):
cls._singleton = None
class SpecificationBuilder:
_urls: List[str]
_title: str
_version: str
_description: Optional[str]
_terms: Optional[str]
_contact: Contact
_license: License
_paths: Dict[str, Dict[str, OperationBuilder]]
_tags: Dict[str, Tag]
_security: List[SecurityRequirement]
_components: Dict[str, Any]
_servers: List[Server]
# _components: ComponentsBuilder
# deliberately not included
_singleton: Optional[SpecificationBuilder] = None
def __new__(cls) -> SpecificationBuilder:
if not cls._singleton:
cls._singleton = super().__new__(cls)
cls._setup_instance(cls._singleton)
return cast(SpecificationBuilder, cls._singleton)
@classmethod
def _setup_instance(cls, instance):
instance._components = defaultdict(dict)
instance._contact = None
instance._description = None
instance._external = None
instance._license = None
instance._paths = defaultdict(dict)
instance._servers = []
instance._tags = {}
instance._security = []
instance._terms = None
instance._title = None
instance._urls = []
instance._version = None
@classmethod
def reset(cls):
cls._singleton = None
@property
def tags(self):
return self._tags
@property
def security(self):
return self._security
def url(self, value: str):
self._urls.append(value)
def describe(
self,
title: str,
version: str,
description: Optional[str] = None,
terms: Optional[str] = None,
):
self._title = title
self._version = version
self._description = description
self._terms = terms
def _do_describe(
self,
title: str,
version: str,
description: Optional[str] = None,
terms: Optional[str] = None,
):
if any([self._title, self._version, self._description, self._terms]):
return
self.describe(title, version, description, terms)
def tag(self, name: str, description: Optional[str] = None, **kwargs):
self._tags[name] = Tag(name, description=description, **kwargs)
def external(self, url: str, description: Optional[str] = None, **kwargs):
self._external = ExternalDocumentation(url, description=description)
def secured(
self,
name: str = None,
value: Optional[Union[str, Sequence[str]]] = None,
):
if value is None:
value = []
elif isinstance(value, str):
value = [value]
else:
value = list(value)
self._security.append(SecurityRequirement(name=name, value=value))
def contact(self, name: str = None, url: str = None, email: str = None):
kwargs = remove_nulls_from_kwargs(name=name, url=url, email=email)
self._contact = Contact(**kwargs)
def _do_contact(
self, name: str = None, url: str = None, email: str = None
):
if self._contact:
return
self.contact(name, url, email)
def license(self, name: str = None, url: str = None):
if name is not None:
self._license = License(name, url=url)
def _do_license(self, name: str = None, url: str = None):
if self._license:
return
self.license(name, url)
def operation(self, path: str, method: str, operation: OperationBuilder):
for _tag in operation.tags:
if _tag in self._tags.keys():
continue
self._tags[_tag] = Tag(_tag)
self._paths[path][method.lower()] = operation
def add_component(self, location: str, name: str, obj: Any):
self._components[location].update({name: obj})
def has_component(self, location: str, name: str) -> bool:
return name in self._components.get(location, {})
def add_security_scheme(
self,
ident: str,
type: Union[str, SecuritySchemeType],
*,
bearer_format: Optional[str] = None,
description: Optional[str] = None,
flows: Optional[Union[Flows, Dict[str, Any]]] = None,
location: Union[
str, SecuritySchemeLocation
] = SecuritySchemeLocation.HEADER,
name: str = "authorization",
openid_connect_url: Optional[str] = None,
scheme: Union[
str, SecuritySchemeAuthorization
] = SecuritySchemeAuthorization.BEARER,
):
if isinstance(type, str):
type = SecuritySchemeType(type)
if isinstance(location, str):
location = SecuritySchemeLocation(location)
kwargs: Dict[str, Any] = {"type": type, "description": description}
if type is SecuritySchemeType.API_KEY:
kwargs["location"] = location
kwargs["name"] = name
elif type is SecuritySchemeType.HTTP:
kwargs["scheme"] = scheme
kwargs["bearerFormat"] = bearer_format
elif type is SecuritySchemeType.OAUTH2:
kwargs["flows"] = flows
elif type is SecuritySchemeType.OPEN_ID_CONNECT:
kwargs["openIdConnectUrl"] = openid_connect_url
self.add_component(
"securitySchemes",
ident,
SecurityScheme(**kwargs),
) # type: ignore
def raw(self, data):
if "info" in data:
self.describe(
data["info"].get("title"),
data["info"].get("version"),
data["info"].get("description"),
data["info"].get("terms"),
)
if "servers" in data:
for server in data["servers"]:
self._servers.append(Server(**server))
if "paths" in data:
self._paths.update(data["paths"])
if "components" in data:
for location, component in data["components"].items():
self._components[location].update(component)
if "security" in data:
for security in data["security"]:
if not security:
self.secured()
else:
for key, value in security.items():
self.secured(key, value)
if "tags" in data:
for tag in data["tags"]:
self.tag(**tag)
if "externalDocs" in data:
self.external(**data["externalDocs"])
def build(self, app: Sanic) -> OpenAPI:
info = self._build_info()
paths = self._build_paths(app)
tags = self._build_tags()
security = self._build_security()
url_servers = getattr(self, "_urls", None)
servers = self._servers
existing = [
server.fields["url"].strip("/") for server in self._servers
]
if url_servers is not None:
for url_server in url_servers:
if url_server.strip("/") not in existing:
servers.append(Server(url=url_server))
components = (
Components(**self._components) if self._components else None
)
return OpenAPI(
info,
paths,
tags=tags,
servers=servers,
security=security,
components=components,
externalDocs=self._external,
)
def _build_info(self) -> Info:
kwargs = remove_nulls(
{
"description": self._description,
"termsOfService": self._terms,
"license": self._license,
"contact": self._contact,
},
deep=False,
)
return Info(self._title, self._version, **kwargs)
def _build_tags(self):
return [self._tags[k] for k in self._tags]
def _build_paths(self, app: Sanic) -> Dict:
paths = {}
for path, operations in self._paths.items():
paths[path] = PathItem(
**{
k: v if isinstance(v, dict) else v.build()
for k, v in operations.items()
if isinstance(v, dict) or v._app is app
}
)
return paths
def _build_security(self):
return [
{sec.fields["name"]: sec.fields["value"]}
if sec.fields["name"] is not None
else {}
for sec in self.security
]
|
/sanic-ext-23.6.0.tar.gz/sanic-ext-23.6.0/sanic_ext/extensions/openapi/builders.py
| 0.618435 | 0.152758 |
builders.py
|
pypi
|
from __future__ import annotations
from dataclasses import _HAS_DEFAULT_FACTORY # type: ignore
from typing import (
Any,
Literal,
Mapping,
NamedTuple,
Optional,
Tuple,
Union,
get_args,
get_origin,
)
from sanic_ext.utils.typing import (
UnionType,
is_generic,
is_msgspec,
is_optional,
)
MISSING: Tuple[Any, ...] = (_HAS_DEFAULT_FACTORY,)
try:
import attrs # noqa
NOTHING = attrs.NOTHING
ATTRS = True
MISSING = (
_HAS_DEFAULT_FACTORY,
NOTHING,
)
except ImportError:
ATTRS = False
try:
import msgspec
MSGSPEC = True
except ImportError:
MSGSPEC = False
class Hint(NamedTuple):
hint: Any
model: bool
literal: bool
typed: bool
nullable: bool
origin: Optional[Any]
allowed: Tuple[Hint, ...] # type: ignore
allow_missing: bool
def validate(
self, value, schema, allow_multiple=False, allow_coerce=False
):
if not self.typed:
if self.model:
return check_data(
self.hint,
value,
schema,
allow_multiple=allow_multiple,
allow_coerce=allow_coerce,
)
if (
allow_multiple
and isinstance(value, list)
and self.coerce_type is not list
and len(value) == 1
):
value = value[0]
try:
_check_types(value, self.literal, self.hint)
except ValueError as e:
if allow_coerce:
value = self.coerce(value)
_check_types(value, self.literal, self.hint)
else:
raise e
else:
value = _check_nullability(
value,
self.nullable,
self.allowed,
schema,
allow_multiple,
allow_coerce,
)
if not self.nullable:
if self.origin in (Union, Literal, UnionType):
value = _check_inclusion(
value,
self.allowed,
schema,
allow_multiple,
allow_coerce,
)
elif self.origin is list:
value = _check_list(
value,
self.allowed,
self.hint,
schema,
allow_multiple,
allow_coerce,
)
elif self.origin is dict:
value = _check_dict(
value,
self.allowed,
self.hint,
schema,
allow_multiple,
allow_coerce,
)
if allow_coerce:
value = self.coerce(value)
return value
def coerce(self, value):
if is_generic(self.coerce_type):
args = get_args(self.coerce_type)
if get_origin(self.coerce_type) == Literal or (
all(get_origin(arg) == Literal for arg in args)
):
return value
if type(None) in args and value is None:
return None
coerce_types = [arg for arg in args if not isinstance(None, arg)]
else:
coerce_types = [self.coerce_type]
for coerce_type in coerce_types:
try:
if isinstance(value, list):
value = [coerce_type(item) for item in value]
else:
value = coerce_type(value)
except (ValueError, TypeError):
...
else:
return value
return value
@property
def coerce_type(self):
coerce_type = self.hint
if is_optional(coerce_type):
coerce_type = get_args(self.hint)[0]
return coerce_type
def check_data(model, data, schema, allow_multiple=False, allow_coerce=False):
if not isinstance(data, dict):
raise TypeError(f"Value '{data}' is not a dict")
sig = schema[model.__name__]["sig"]
hints = schema[model.__name__]["hints"]
bound = sig.bind(**data)
bound.apply_defaults()
params = dict(zip(sig.parameters, bound.args))
params.update(bound.kwargs)
hydration_values = {}
try:
for key, value in params.items():
hint = hints.get(key, Any)
try:
hydration_values[key] = hint.validate(
value,
schema,
allow_multiple=allow_multiple,
allow_coerce=allow_coerce,
)
except ValueError:
if not hint.allow_missing or value not in MISSING:
raise
except ValueError as e:
raise TypeError(e)
if MSGSPEC and is_msgspec(model):
try:
return msgspec.from_builtins(
hydration_values, model, str_values=True, str_keys=True
)
except msgspec.ValidationError as e:
raise TypeError(e)
else:
return model(**hydration_values)
def _check_types(value, literal, expected):
if literal:
if expected is Any:
return
elif value != expected:
raise ValueError(f"Value '{value}' must be {expected}")
else:
if MSGSPEC and is_msgspec(expected) and isinstance(value, Mapping):
try:
expected(**value)
except (TypeError, msgspec.ValidationError):
raise ValueError(f"Value '{value}' is not of type {expected}")
elif not isinstance(value, expected):
raise ValueError(f"Value '{value}' is not of type {expected}")
def _check_nullability(
value, nullable, allowed, schema, allow_multiple, allow_coerce
):
if not nullable and value is None:
raise ValueError("Value cannot be None")
if nullable and value is not None:
exc = None
for hint in allowed:
try:
value = hint.validate(
value, schema, allow_multiple, allow_coerce
)
except ValueError as e:
exc = e
else:
break
else:
if exc:
if len(allowed) == 1:
raise exc
else:
options = ", ".join(
[str(option.hint) for option in allowed]
)
raise ValueError(
f"Value '{value}' must be one of {options}, or None"
)
return value
def _check_inclusion(value, allowed, schema, allow_multiple, allow_coerce):
for option in allowed:
try:
return option.validate(value, schema, allow_multiple, allow_coerce)
except (ValueError, TypeError):
...
options = ", ".join([str(option.hint) for option in allowed])
raise ValueError(f"Value '{value}' must be one of {options}")
def _check_list(value, allowed, hint, schema, allow_multiple, allow_coerce):
if isinstance(value, list):
try:
return [
_check_inclusion(
item, allowed, schema, allow_multiple, allow_coerce
)
for item in value
]
except (ValueError, TypeError):
...
raise ValueError(f"Value '{value}' must be a {hint}")
def _check_dict(value, allowed, hint, schema, allow_multiple, allow_coerce):
if isinstance(value, dict):
try:
return {
key: _check_inclusion(
item, allowed, schema, allow_multiple, allow_coerce
)
for key, item in value.items()
}
except (ValueError, TypeError):
...
raise ValueError(f"Value '{value}' must be a {hint}")
|
/sanic-ext-23.6.0.tar.gz/sanic-ext-23.6.0/sanic_ext/extras/validation/check.py
| 0.669745 | 0.206724 |
check.py
|
pypi
|
import types
from dataclasses import MISSING, Field, is_dataclass
from inspect import isclass, signature
from typing import (
Any,
Dict,
Literal,
Optional,
Tuple,
Union,
get_args,
get_origin,
get_type_hints,
)
from sanic_ext.utils.typing import is_attrs, is_generic, is_msgspec
from .check import Hint
try:
UnionType = types.UnionType # type: ignore
except AttributeError:
UnionType = type("UnionType", (), {})
try:
from attr import NOTHING, Attribute
except ModuleNotFoundError:
NOTHING = object() # type: ignore
Attribute = type("Attribute", (), {}) # type: ignore
try:
from msgspec.inspect import type_info as msgspec_type_info
except ModuleNotFoundError:
def msgspec_type_info(val):
pass
def make_schema(agg, item):
if type(item) in (bool, str, int, float):
return agg
if is_generic(item) and (args := get_args(item)):
for arg in args:
make_schema(agg, arg)
elif item.__name__ not in agg and (
is_dataclass(item) or is_attrs(item) or is_msgspec(item)
):
if is_dataclass(item):
fields = item.__dataclass_fields__
elif is_msgspec(item):
fields = {f.name: f.type for f in msgspec_type_info(item).fields}
else:
fields = {attr.name: attr for attr in item.__attrs_attrs__}
sig = signature(item)
hints = parse_hints(get_type_hints(item), fields)
agg[item.__name__] = {
"sig": sig,
"hints": hints,
}
for hint in hints.values():
make_schema(agg, hint.hint)
return agg
def parse_hints(
hints, fields: Dict[str, Union[Field, Attribute]]
) -> Dict[str, Hint]:
output: Dict[str, Hint] = {
name: parse_hint(hint, fields.get(name))
for name, hint in hints.items()
}
return output
def parse_hint(hint, field: Optional[Union[Field, Attribute]] = None):
origin = None
literal = not isclass(hint)
nullable = False
typed = False
model = False
allowed: Tuple[Any, ...] = tuple()
allow_missing = False
if field and (
(
isinstance(field, Field)
and field.default_factory is not MISSING # type: ignore
)
or (isinstance(field, Attribute) and field.default is not NOTHING)
):
allow_missing = True
if is_dataclass(hint) or is_attrs(hint):
model = True
elif is_generic(hint):
typed = True
literal = False
origin = get_origin(hint)
args = get_args(hint)
nullable = origin in (Union, UnionType) and type(None) in args
if nullable:
allowed = tuple(
[
arg
for arg in args
if is_generic(arg) or not isinstance(None, arg)
]
)
elif origin is dict:
allowed = (args[1],)
elif (
origin is list
or origin is Literal
or origin is Union
or origin is UnionType
):
allowed = args
return Hint(
hint,
model,
literal,
typed,
nullable,
origin,
tuple([parse_hint(item, None) for item in allowed]),
allow_missing,
)
|
/sanic-ext-23.6.0.tar.gz/sanic-ext-23.6.0/sanic_ext/extras/validation/schema.py
| 0.657648 | 0.185246 |
schema.py
|
pypi
|
import abc
import asyncio
import logging
import sys
import time
from typing import Callable, Dict, Iterator, Mapping, Optional
from sanic import Sanic, response
log = logging.getLogger(__name__)
MSG_OK = 'OK'
MSG_FAIL = 'FAILED'
class BaseChecker(metaclass=abc.ABCMeta):
"""The base class for all checkers.
This class implements various common functionality for all checkers
and requires that each checker define its own ``run`` method. Each
checker implementation should also set its own ``default_uri``.
Args:
app: The Sanic application instance to register the checker to. If not specified on
initialization, the user must pass it to the ``init`` method to register the checker
route with the application. If specified on initialization, ``init`` will be called
automatically.
uri: The route URI to expose for the checker.
checks: A collection of checks to register with the checker on init. A check is a
function which takes no arguments and returns (``bool``, ``str``), where the
boolean signifies whether the check passed or not, and the string is a message
associated with the success/failure.
success_handler: A handler function which takes the check results (a list[dict])
and returns a message string. This is called when all checks pass.
success_headers: Headers to include in the checker response on success. By default, no
additional headers are sent. This can be useful if, for example, a success
handler is specified which returns a JSON message. The Content-Type: application/json
header could be included here.
success_status: The HTTP status code to use when the checker passes its checks.
failure_handler: A handler function which takes the check results (a list[dict])
and returns a message string. This is called when any check fails.
failure_headers: Headers to include in the checker response on failure. By default, no
additional headers are sent. This can be useful if, for example, a failure
handler is specified which returns a JSON message. The Content-Type: application/json
header could be included here.
failure_status: The HTTP status code to use when the checker fails its checks.
exception_handler: A function which would get called when a registered check
raises an exception. This handler must take two arguments: the check function
which raised the exception, and the tuple returned by ``sys.exc_info``. It must
return a tuple of (bool, string), where the boolean is whether or not it passed
and the string is the message to use for the check response. By default, no
exception handler is registered, so an exception will lead to a check failure.
options: Any additional options to pass to the ``Sanic.add_route`` method
on ``init``.
"""
default_uri = None
def __init__(
self,
app: Optional[Sanic] = None,
uri: Optional[str] = None,
checks: Optional[Iterator[Callable]] = None,
success_handler: Optional[Callable] = None,
success_headers: Optional[Mapping] = None,
success_status: Optional[int] = 200,
failure_handler: Optional[Callable] = None,
failure_headers: Optional[Mapping] = None,
failure_status: Optional[int] = 500,
exception_handler: Optional[Callable] = None,
**options,
) -> None:
self.app = app
self.uri = uri
self.success_handler = success_handler
self.success_headers = success_headers
self.success_status = success_status
self.failure_handler = failure_handler
self.failure_headers = failure_headers
self.failure_status = failure_status
self.exception_handler = exception_handler
self.checks = checks or []
self.options = options
if self.app:
self.init(self.app, self.uri)
def init(self, app: Sanic, uri: Optional[str] = None) -> None:
"""Initialize the checker with the Sanic application.
This method will register a new endpoint for the specified
Sanic application which exposes the results of the checker.
Args:
app: The Sanic application to register a new endpoint with.
uri: The URI of the endpoint to register. If not specified, the
checker's ``default_uri`` is used.
"""
if not uri:
uri = self.default_uri
app.add_route(self.run, uri, **self.options)
def add_check(self, fn: Callable) -> None:
"""Add a check to the checker.
A check function is a function which takes no arguments and returns
(``bool``, ``str``), where the boolean signifies whether the check
passed or not, and the string is a message associated with the
success/failure.
Args:
fn: The check to add.
"""
self.checks.append(fn)
@abc.abstractmethod
async def run(self, request) -> response.HTTPResponse:
"""Run the checker.
Each subclass of the BaseChecker must define its own ``run`` logic.
"""
raise NotImplementedError
async def exec_check(self, check: Callable) -> Dict:
"""Execute a single check and generate a dictionary result from the
result of the check.
Args:
check: The check function to execute.
Returns:
A dictionary containing the results of the check.
"""
try:
if asyncio.iscoroutinefunction(check):
passed, msg = await check()
else:
passed, msg = check()
except Exception:
log.exception(
f'Exception while running {self.__class__.__name__} check')
info = sys.exc_info()
if self.exception_handler:
passed, msg = self.exception_handler(check, info)
else:
passed = False
msg = f'Exception raised: {info[0].__name__}: {info[1]}'
if not passed:
log.error(
f'{self.__class__.__name__} check "{check.__name__}" failed: {msg}')
return {
'check': check.__name__,
'message': msg,
'passed': passed,
'timestamp': time.time(),
}
|
/sanic_healthcheck-0.1.1.tar.gz/sanic_healthcheck-0.1.1/sanic_healthcheck/checker.py
| 0.753013 | 0.252407 |
checker.py
|
pypi
|
import logging
import time
from typing import Callable, Mapping, Optional
from sanic import Sanic, response
from .checker import MSG_FAIL, MSG_OK, BaseChecker
log = logging.getLogger(__name__)
class HealthCheck(BaseChecker):
"""A checker allowing a Sanic application to describe the health of the
application at runtime.
The results of registered check functions are cached by this checker by
default. To disable result caching, initialize the checker with ``no_cache=True``.
Since the health endpoint may be polled frequently (and potentially by multiple
systems), the cache allows the check function results to be valid for a window of
time, reducing the execution cost. This may be particularly helpful if a given
health check is more expensive.
Args:
app: The Sanic application instance to register the checker to. If not specified on
initialization, the user must pass it to the ``init`` method to register the checker
route with the application. If specified on initialization, ``init`` will be called
automatically.
uri: The route URI to expose for the checker.
checks: A collection of checks to register with the checker on init. A check is a
function which takes no arguments and returns (``bool``, ``str``), where the
boolean signifies whether the check passed or not, and the string is a message
associated with the success/failure.
no_cache: Disable the checker from caching check results. If this is set to ``True``, the
``success_ttl`` and ``failure_ttl`` do nothing.
success_handler: A handler function which takes the check results (a list[dict])
and returns a message string. This is called when all checks pass.
success_headers: Headers to include in the checker response on success. By default, no
additional headers are sent. This can be useful if, for example, a success
handler is specified which returns a JSON message. The Content-Type: application/json
header could be included here.
success_status: The HTTP status code to use when the checker passes its checks.
success_ttl: The TTL for a successful check result to live in the cache before it is updated.
failure_handler: A handler function which takes the check results (a list[dict])
and returns a message string. This is called when any check fails.
failure_headers: Headers to include in the checker response on failure. By default, no
additional headers are sent. This can be useful if, for example, a failure
handler is specified which returns a JSON message. The Content-Type: application/json
header could be included here.
failure_status: The HTTP status code to use when the checker fails its checks.
failure_ttl: The TTL for a failed check result to live in the cache before it is updated.
exception_handler: A function which would get called when a registered check
raises an exception. This handler must take two arguments: the check function
which raised the exception, and the tuple returned by ``sys.exc_info``. It must
return a tuple of (bool, string), where the boolean is whether or not it passed
and the string is the message to use for the check response. By default, no
exception handler is registered, so an exception will lead to a check failure.
options: Any additional options to pass to the ``Sanic.add_route`` method
on ``init``.
"""
default_uri = '/health'
def __init__(
self,
app: Optional[Sanic] = None,
uri: Optional[str] = None,
checks=None,
no_cache: bool = False,
success_handler: Optional[Callable] = None,
success_headers: Optional[Mapping] = None,
success_status: Optional[int] = 200,
success_ttl: Optional[int] = 25,
failure_handler: Optional[Callable] = None,
failure_headers: Optional[Mapping] = None,
failure_status: Optional[int] = 500,
failure_ttl: Optional[int] = 5,
exception_handler: Optional[Callable] = None,
**options,
) -> None:
self.cache = {}
self.no_cache = no_cache
self.success_ttl = success_ttl
self.failure_ttl = failure_ttl
super(HealthCheck, self).__init__(
app=app,
uri=uri,
checks=checks,
success_handler=success_handler,
success_headers=success_headers,
success_status=success_status,
failure_handler=failure_handler,
failure_headers=failure_headers,
failure_status=failure_status,
exception_handler=exception_handler,
**options,
)
async def run(self, request) -> response.HTTPResponse:
"""Run all checks and generate an HTTP response for the results."""
results = []
for check in self.checks:
# See if the check already has a cached health state. If so, use it;
# otherwise, re-run the check.
if not self.no_cache and check in self.cache and self.cache[check].get('expires') >= time.time():
results.append(self.cache[check])
else:
result = await self.exec_check(check)
if not self.no_cache:
if result.get('passed'):
ttl = self.success_ttl
else:
ttl = self.failure_ttl
result['expires'] = result['timestamp'] + ttl
self.cache[check] = result
results.append(result)
passed = all((r['passed'] for r in results))
if passed:
msg = MSG_OK
if self.success_handler:
msg = self.success_handler(results)
return response.text(
body=msg,
status=self.success_status,
headers=self.success_headers,
)
else:
msg = MSG_FAIL
if self.failure_handler:
msg = self.failure_handler(results)
return response.text(
body=msg,
status=self.failure_status,
headers=self.failure_headers,
)
|
/sanic_healthcheck-0.1.1.tar.gz/sanic_healthcheck-0.1.1/sanic_healthcheck/health.py
| 0.894993 | 0.361052 |
health.py
|
pypi
|
import socket
from . import defaults as defs
def is_valid_ipv4(ip_str):
"""
Check the validity of an IPv4 address
"""
try:
socket.inet_pton(socket.AF_INET, ip_str)
except AttributeError: # noqa
try: # Fall-back on legacy API or False
socket.inet_aton(ip_str)
except (AttributeError, socket.error):
return False
return ip_str.count(".") == 3
except socket.error:
return False
return True
def is_valid_ipv6(ip_str):
"""
Check the validity of an IPv6 address
"""
try:
socket.inet_pton(socket.AF_INET6, ip_str)
except socket.error:
return False
return True
def is_valid_ip(ip_str):
"""
Check the validity of an IP address
"""
return is_valid_ipv4(ip_str) or is_valid_ipv6(ip_str)
def is_private_ip(ip_str):
"""
Returns true of ip_str is private & not routable, else return false
"""
if defs.IPWARE_IPV4_REGEX.match(ip_str) is not None:
return True
return ip_str.startswith(
defs.IPWARE_PRIVATE_IPV6_PREFIX + defs.IPWARE_LOOPBACK_PREFIX
)
def is_public_ip(ip_str):
"""
Returns true of ip_str is public & routable, else return false
"""
return not is_private_ip(ip_str)
def is_loopback_ip(ip_str):
"""
Returns true of ip_str is public & routable, else return false
"""
return ip_str.startswith(defs.IPWARE_LOOPBACK_PREFIX)
def get_request_header(request, header):
"""
Given a header, it returns a cleaned up version of the value from
request.headers, or None
"""
value = request.headers.get(header, "").strip()
if value == "":
return None
return value
def get_ips_from_string(ip_str):
"""
Given a string, it returns a list of one or more valid IP addresses
"""
ip_list = []
for ip in ip_str.split(","):
clean_ip = ip.strip().lower()
if clean_ip:
ip_list.append(clean_ip)
ip_count = len(ip_list)
if ip_count > 0:
if is_valid_ip(ip_list[0]) and is_valid_ip(ip_list[-1]):
return ip_list, ip_count
return [], 0
def get_ip_info(ip_str):
"""
Given a string, it returns a tuple of (IP, Routable).
"""
ip = None
is_routable_ip = False
if is_valid_ip(ip_str):
ip = ip_str
is_routable_ip = is_public_ip(ip)
return ip, is_routable_ip
def get_best_ip(last_ip, next_ip):
"""
Given two IP addresses, it returns the the best match ip.
Order of precedence is (Public, Private, Loopback, None)
Right-most IP is returned
"""
if last_ip is None:
return next_ip
if is_public_ip(last_ip) and not is_public_ip(next_ip):
return last_ip
if is_private_ip(last_ip) and is_loopback_ip(next_ip):
return last_ip
return next_ip
__all__ = (
"is_valid_ipv4",
"is_valid_ipv6",
"is_valid_ip",
"is_private_ip",
"is_public_ip",
"is_loopback_ip",
"get_request_header",
"get_ips_from_string",
"get_ip_info",
"get_best_ip",
)
|
/sanic-ipware-0.1.0.tar.gz/sanic-ipware-0.1.0/src/sanic_ipware/utils.py
| 0.662906 | 0.233794 |
utils.py
|
pypi
|
import typing as t
from . import defaults as defs
from . import utils as util
def get_client_ip(
request: object,
proxy_order: str = "left-most",
proxy_count: int = None,
proxy_trusted_ips: t.List[str] = None,
request_header_order: t.Optional[
t.Union[t.List[str], t.Tuple[str]]
] = None,
) -> t.Tuple[t.Optional[str], bool]:
client_ip = None
routable = False
if proxy_count is None:
proxy_count = -1
if proxy_trusted_ips is None:
proxy_trusted_ips = []
if request_header_order is None:
request_header_order = defs.IPWARE_META_PRECEDENCE_ORDER
for header in request_header_order:
value = util.get_request_header(request, header)
if value:
ips, ip_count = util.get_ips_from_string(value)
if ip_count < 1:
# we are expecting at least one IP address to process
continue
if proxy_count == 0 and ip_count > 1:
# we are not expecting requests via any proxies
continue
if proxy_count > 0 and proxy_count != ip_count - 1:
# we are expecting requests via `proxy_count` number of proxies
continue
if proxy_trusted_ips and ip_count < 2:
# we are expecting requests via at least one trusted proxy
continue
if proxy_order == "right-most" and ip_count > 1:
# we are expecting requests via proxies to be custom as per
# `<proxy2>, <proxy1>, <client>`
ips.reverse()
if proxy_trusted_ips:
for proxy in proxy_trusted_ips:
if proxy in ips[-1]:
client_ip, routable = util.get_ip_info(ips[0])
if client_ip and routable:
return client_ip, routable
else:
client_ip, routable = util.get_ip_info(
util.get_best_ip(client_ip, ips[0])
)
if client_ip and routable:
return client_ip, routable
return client_ip, routable
|
/sanic-ipware-0.1.0.tar.gz/sanic-ipware-0.1.0/src/sanic_ipware/ipware.py
| 0.447943 | 0.263457 |
ipware.py
|
pypi
|
from enum import Enum
from .predicate import Predicate
from .directions import Directions as Di
from .objects import Objects as Ob
from .transports import Transports as Tr
__all__ = [
'Predicates',
]
class Predicates(Enum):
any = Predicate({Di.incoming, Di.outgoing}, {Tr.post, Tr.ws}, {Ob.request, Ob.response, Ob.notification})
incoming = Predicate({Di.incoming}, {Tr.post, Tr.ws}, {Ob.request, Ob.response, Ob.notification})
outgoing = Predicate({Di.outgoing}, {Tr.post, Tr.ws}, {Ob.request, Ob.response, Ob.notification})
post = Predicate({Di.incoming, Di.outgoing}, {Tr.post}, {Ob.request, Ob.response, Ob.notification})
ws = Predicate({Di.incoming, Di.outgoing}, {Tr.ws}, {Ob.request, Ob.response, Ob.notification})
request = Predicate({Di.incoming, Di.outgoing}, {Tr.post, Tr.ws}, {Ob.request})
response = Predicate({Di.incoming, Di.outgoing}, {Tr.post, Tr.ws}, {Ob.response})
notification = Predicate({Di.incoming, Di.outgoing}, {Tr.post, Tr.ws}, {Ob.notification})
incoming_post = Predicate({Di.incoming}, {Tr.post}, {Ob.request, Ob.response, Ob.notification})
incoming_ws = Predicate({Di.incoming}, {Tr.ws}, {Ob.request, Ob.response, Ob.notification})
outgoing_post = Predicate({Di.outgoing}, {Tr.post}, {Ob.request, Ob.response, Ob.notification})
outgoing_ws = Predicate({Di.outgoing}, {Tr.ws}, {Ob.request, Ob.response, Ob.notification})
incoming_request = Predicate({Di.incoming}, {Tr.post, Tr.ws}, {Ob.request})
incoming_notification = Predicate({Di.incoming}, {Tr.post, Tr.ws}, {Ob.notification})
outgoing_response = Predicate({Di.outgoing}, {Tr.post, Tr.ws}, {Ob.response})
outgoing_notification = Predicate({Di.outgoing}, {Tr.post, Tr.ws}, {Ob.notification})
incoming_post_request = Predicate({Di.incoming}, {Tr.post}, {Ob.request})
incoming_post_notification = Predicate({Di.incoming}, {Tr.post}, {Ob.notification})
incoming_ws_request = Predicate({Di.incoming}, {Tr.ws}, {Ob.request})
incoming_ws_notification = Predicate({Di.incoming}, {Tr.ws}, {Ob.notification})
outgoing_post_response = Predicate({Di.outgoing}, {Tr.post}, {Ob.response})
outgoing_post_notification = Predicate({Di.outgoing}, {Tr.post}, {Ob.notification})
outgoing_ws_response = Predicate({Di.outgoing}, {Tr.ws}, {Ob.response})
outgoing_ws_notification = Predicate({Di.outgoing}, {Tr.ws}, {Ob.notification})
|
/sanic_jsonrpc-0.4.0-py3-none-any.whl/sanic_jsonrpc/_middleware/predicates.py
| 0.525612 | 0.211946 |
predicates.py
|
pypi
|
from calendar import timegm
from datetime import datetime
from functools import wraps
from typing import Dict, List
from sanic import Sanic
from sanic.request import Request
from sanic_jwt_extended.exceptions import (
AccessDenied,
ConfigurationConflictError,
FreshTokenRequired,
InvalidHeaderError,
NoAuthorizationError,
WrongTokenError,
)
from sanic_jwt_extended.tokens import Token, decode_jwt
async def get_jwt_data(app: Sanic, token: str) -> Dict:
"""
Decodes encoded JWT token by using extension setting
:param app: A Sanic application
:param token: Encoded JWT string to decode
:return: Dictionary containing contents of the JWT
"""
secret = (
app.config.JWT_SECRET_KEY
if app.config.JWT_ALGORITHM.startswith("HS")
else app.config.JWT_PUBLIC_KEY
)
jwt_data: dict = await decode_jwt(
encoded_token=token,
secret=secret,
algorithm=app.config.JWT_ALGORITHM,
identity_claim_key=app.config.JWT_IDENTITY_CLAIM,
user_claims_key=app.config.JWT_USER_CLAIMS,
)
return jwt_data
async def get_jwt_data_in_request_header(app: Sanic, request: Request) -> Dict:
"""
Get JWT token data from request header with configuration. raise NoAuthorizationHeaderError
when no jwt header. also raise InvalidHeaderError when malformed jwt header detected.
:param app: A Sanic application
:param request: Sanic request object that contains app
:return: Dictionary containing contents of the JWT
"""
header_name: str = app.config.JWT_HEADER_NAME
header_type: str = app.config.JWT_HEADER_TYPE
token_header: str = request.headers.get(header_name)
if not token_header:
raise NoAuthorizationError("Missing {} Header".format(header_name))
parts: List[str] = token_header.split()
if not header_type:
if len(parts) != 1:
msg = "Bad {} header. Expected value '<JWT>'".format(header_name)
raise InvalidHeaderError(msg)
token: str = parts[0]
else:
if parts[0] != header_type or len(parts) != 2:
msg = "Bad {} header. Expected value '{} <JWT>'".format(
header_name, header_type
)
raise InvalidHeaderError(msg)
token: str = parts[1]
data: Dict = await get_jwt_data(app, token)
return data
async def verify_jwt_data_type(token_data: dict, token_type: str) -> None:
"""
Check jwt type with given argument. raise WrongTokenError if token type is not expected type,
:param token_data: Dictionary containing contents of the JWT
:param token_type: Token type that want to check (ex: access)
"""
if token_data["type"] != token_type:
raise WrongTokenError("Only {} tokens are allowed".format(token_type))
def access_control(role=None, allow=None, deny=None):
accessible = (role in (allow if allow else deny)) == (True if allow else False)
if not accessible:
raise AccessDenied("role {0} is not allowed to access".format(role))
def _get_request(*args):
"""
Get request object from args.
"""
if isinstance(args[0], Request):
request = args[0]
else:
request = args[1]
return request
def jwt_required(function=None, allow=None, deny=None):
"""
A decorator to protect a Sanic endpoint.
If you decorate an endpoint with this, it will ensure that the requester
has a valid access token before allowing the endpoint to be called.
and if token check passed this will insert Token object to kwargs,
This does not check the freshness of the access token.
See also: :func:`~sanic_jwt_extended.fresh_jwt_required`
"""
def actual_jwt_required(fn):
@wraps(fn)
async def wrapper(*args, **kwargs):
request = _get_request(*args)
app = request.app
token = await get_jwt_data_in_request_header(app, request)
await verify_jwt_data_type(token, "access")
try:
if allow:
access_control(token["role"], allow=allow)
elif deny:
access_control(token["role"], deny=deny)
except KeyError:
raise ConfigurationConflictError("Please enable RBAC")
kwargs["token"] = Token(app, token)
return await fn(*args, **kwargs)
return wrapper
if function:
return actual_jwt_required(function)
else:
if allow and deny:
raise ConfigurationConflictError(
"Can not use 'deny' and 'allow' option together."
)
return actual_jwt_required
def jwt_optional(fn):
"""
A decorator to optionally protect a Sanic endpoint
If an access token in present in the request, this will insert filled Token object to kwargs.
If no access token is present in the request, this will insert Empty Token object to kwargs
If there is an invalid access token in the request (expired, tampered with,
etc), this will still call the appropriate error handler instead of allowing
the endpoint to be called as if there is no access token in the request. and also does not check role
"""
@wraps(fn)
async def wrapper(*args, **kwargs):
token = {}
request = _get_request(*args)
app = request.app
try:
token = await get_jwt_data_in_request_header(app, request)
await verify_jwt_data_type(token, "access")
except (NoAuthorizationError, InvalidHeaderError):
pass
kwargs["token"] = Token(app, token)
return await fn(*args, **kwargs)
return wrapper
def fresh_jwt_required(function=None, allow=None, deny=None):
"""
A decorator to protect a Sanic endpoint.
If you decorate an endpoint with this, it will ensure that the requester
has a valid and fresh access token before allowing the endpoint to be
called.
See also: :func:`~sanic_jwt_extended.jwt_required`
"""
def actual_fresh_jwt_required(fn):
@wraps(fn)
async def wrapper(*args, **kwargs):
request = _get_request(*args)
app = request.app
token = await get_jwt_data_in_request_header(app, request)
await verify_jwt_data_type(token, "access")
fresh = token["fresh"]
if isinstance(fresh, bool):
if not fresh:
raise FreshTokenRequired("Fresh token required")
else:
now = timegm(datetime.utcnow().utctimetuple())
if fresh < now:
raise FreshTokenRequired("Fresh token required")
try:
if allow:
access_control(token["role"], allow=allow)
elif deny:
access_control(token["role"], deny=deny)
except KeyError:
raise ConfigurationConflictError("Please enable RBAC")
kwargs["token"] = Token(app, token)
return await fn(*args, **kwargs)
return wrapper
if function:
return actual_fresh_jwt_required(function)
else:
if allow and deny:
raise ConfigurationConflictError(
"Can not use 'deny' and 'allow' option together."
)
return actual_fresh_jwt_required
def jwt_refresh_token_required(fn):
"""
A decorator to protect a Sanic endpoint.
If you decorate an endpoint with this, it will ensure that the requester
has a valid refresh token before allowing the endpoint to be called.
"""
@wraps(fn)
async def wrapper(*args, **kwargs):
request = _get_request(*args)
app = request.app
token = await get_jwt_data_in_request_header(app, request)
await verify_jwt_data_type(token, "refresh")
kwargs["token"] = Token(app, token)
return await fn(*args, **kwargs)
return wrapper
|
/Sanic_JWT_Extended-0.4.4-py3-none-any.whl/sanic_jwt_extended/decorators.py
| 0.884819 | 0.181227 |
decorators.py
|
pypi
|
async def create_access_token(
app, identity, user_claims=None, role=None, fresh=False, expires_delta=None
):
"""
Create a new access token.
:param app: A Sanic application from request object
:param identity: The identity of this token, which can be any data that is
json serializable. It can also be a python object
:param user_claims: User made claims that will be added to this token. it
should be dictionary.
:param role: A role field for RBAC
:param fresh: If this token should be marked as fresh, and can thus access
:func:`~sanic_jwt_extended.fresh_jwt_required` endpoints.
Defaults to `False`. This value can also be a
`datetime.timedelta` in which case it will indicate how long
this token will be considered fresh.
:param expires_delta: A `datetime.timedelta` for how long this token should
last before it expires. Set to False to disable
expiration. If this is None, it will use the
'JWT_ACCESS_TOKEN_EXPIRES` config value
:return: An encoded access token
"""
return await app.jwt._create_access_token(
app, identity, user_claims, role, fresh, expires_delta
)
async def create_refresh_token(app, identity, user_claims=None, expires_delta=None):
"""
Create a new refresh token.
:param app: A Sanic application from request object
:param identity: The identity of this token, which can be any data that is
json serializable. It can also be a python object
:param user_claims: User made claims that will be added to this token. it
should be dictionary.
:param expires_delta: A `datetime.timedelta` for how long this token should
last before it expires. Set to False to disable
expiration. If this is None, it will use the
'JWT_REFRESH_TOKEN_EXPIRES` config value
:return: An encoded access token
"""
return await app.jwt._create_refresh_token(
app, identity, user_claims, expires_delta
)
|
/Sanic_JWT_Extended-0.4.4-py3-none-any.whl/sanic_jwt_extended/utils.py
| 0.837852 | 0.276295 |
utils.py
|
pypi
|
import datetime
import uuid
from calendar import timegm
from typing import Callable, Dict, Union
import jwt
from sanic import Sanic
from sanic_jwt_extended.exceptions import JWTDecodeError
def _encode_jwt(
additional_token_data: dict,
expires_delta: datetime.timedelta,
secret: str,
algorithm: str,
json_encoder: Callable[..., str],
) -> str:
uid = str(uuid.uuid4())
now = datetime.datetime.utcnow()
token_data = {"iat": now, "nbf": now, "jti": uid}
# If expires_delta is False, the JWT should never expire
# and the 'exp' claim is not set.
if expires_delta:
token_data["exp"] = now + expires_delta
token_data.update(additional_token_data)
encoded_token = jwt.encode(
token_data, secret, algorithm, json_encoder=json_encoder
).decode("utf-8")
return encoded_token
async def encode_access_token(
identity: str,
secret: str,
algorithm: str,
expires_delta: datetime.timedelta,
fresh: Union[datetime.timedelta, bool],
user_claims: dict,
role: str,
identity_claim_key: str,
user_claims_key: str,
json_encoder: Callable[..., str] = None,
) -> str:
"""
Creates a new encoded (utf-8) access token.
:param identity: Identifier for who this token is for (ex, username). This
data must be json serializable
:param secret: Secret key to encode the JWT with
:param algorithm: Which algorithm to encode this JWT with
:param expires_delta: How far in the future this token should expire
(set to False to disable expiration)
:type expires_delta: datetime.timedelta or False
:param fresh: If this should be a 'fresh' token or not. If a
datetime.timedelta is given this will indicate how long this
token will remain fresh.
:param user_claims: Custom claims to include in this token. This data must
be json serializable
:param role: A role field for RBAC
:param identity_claim_key: Which key should be used to store the identity
:param user_claims_key: Which key should be used to store the user claims
:param json_encoder: json encoder
:return: Encoded access token
"""
if isinstance(fresh, datetime.timedelta):
now = datetime.datetime.utcnow()
fresh = timegm((now + fresh).utctimetuple())
token_data = {identity_claim_key: identity, "fresh": fresh, "type": "access"}
# Don't add extra data to the token if user_claims is empty.
if user_claims:
token_data[user_claims_key] = user_claims
if role:
token_data["role"] = role
return _encode_jwt(
token_data, expires_delta, secret, algorithm, json_encoder=json_encoder
)
async def encode_refresh_token(
identity,
secret,
algorithm,
expires_delta,
user_claims,
identity_claim_key,
user_claims_key,
json_encoder=None,
):
"""
Creates a new encoded (utf-8) refresh token.
:param identity: Some identifier used to identify the owner of this token
:param secret: Secret key to encode the JWT with
:param algorithm: Which algorithm to use for the toek
:param expires_delta: How far in the future this token should expire
(set to False to disable expiration)
:type expires_delta: datetime.timedelta or False
:param user_claims: Custom claims to include in this token. This data must
be json serializable
:param identity_claim_key: Which key should be used to store the identity
:param user_claims_key: Which key should be used to store the user claims
:param json_encoder: json encoder
:return: Encoded refresh token
"""
token_data = {identity_claim_key: identity, "type": "refresh"}
# Don't add extra data to the token if user_claims is empty.
if user_claims:
token_data[user_claims_key] = user_claims
return _encode_jwt(
token_data, expires_delta, secret, algorithm, json_encoder=json_encoder
)
async def decode_jwt(
encoded_token: str,
secret: str,
algorithm: str,
identity_claim_key: str,
user_claims_key: str,
) -> Dict:
"""
Decodes an encoded JWT
:param encoded_token: The encoded JWT string to decode
:param secret: Secret key used to encode the JWT
:param algorithm: Algorithm used to encode the JWT
:param identity_claim_key: expected key that contains the identity
:param user_claims_key: expected key that contains the user claims
:return: Dictionary containing contents of the JWT
"""
# This call verifies the ext, iat, and nbf claims
data: dict = jwt.decode(encoded_token, secret, algorithms=[algorithm])
# Make sure that any custom claims we expect in the token are present
if "jti" not in data:
raise JWTDecodeError("Missing claim: jti")
if identity_claim_key not in data:
raise JWTDecodeError("Missing claim: {}".format(identity_claim_key))
if "type" not in data or data["type"] not in ("refresh", "access"):
raise JWTDecodeError("Missing or invalid claim: type")
if data["type"] == "access":
if "fresh" not in data:
raise JWTDecodeError("Missing claim: fresh")
if user_claims_key not in data:
data[user_claims_key] = {}
return data
class Token:
"""
Token object that contains decoded token data and passed with kwargs to endpoint function
"""
data: dict
app: Sanic
def __init__(self, app: Sanic, token: dict):
self.app = app
self.data = token
@property
def raw_jwt(self) -> dict:
"""
:return: full jwt data in dictionary form
"""
return self.data
@property
def jwt_identity(self) -> Union[str, None]:
"""
:return: jwt identity claim data (or this can be None if data does not exist)
"""
return self.data.get(self.app.config.JWT_IDENTITY_CLAIM, None)
@property
def jwt_user_claims(self) -> Dict:
"""
:return: user claim data
"""
return self.data.get(self.app.config.JWT_USER_CLAIMS, {})
@property
def jti(self) -> str:
"""
:return: jti data
"""
return self.data.get("jti", None)
|
/Sanic_JWT_Extended-0.4.4-py3-none-any.whl/sanic_jwt_extended/tokens.py
| 0.876727 | 0.317717 |
tokens.py
|
pypi
|
from sanic_jwt import utils
def normalize(scope):
"""
Normalizes and returns tuple consisting of namespace, and action(s)
"""
parts = scope.split(":")
return (parts[0], parts[1:])
def validate_single_scope(
required, user_scopes, require_all_actions=True, override=None
):
if not user_scopes:
return False
elif user_scopes.count(None) > 0:
if user_scopes.count(None) == len(user_scopes):
return False
user_scopes = list(filter(lambda v: v is not None, user_scopes))
required = normalize(required)
user_scopes = [normalize(x) for x in user_scopes]
is_valid = False
for requested in user_scopes:
if required[0]:
valid_namespace = required[0] == requested[0]
else:
valid_namespace = True
if required[1]:
if len(requested[1]) == 0:
valid_actions = True
else:
method = all if require_all_actions else any
valid_actions = method(x in requested[1] for x in required[1])
else:
valid_actions = len(requested[1]) == 0
is_valid = all([valid_namespace, valid_actions])
if is_valid:
break
outcome = (
override(is_valid, required, user_scopes, require_all_actions)
if callable(override)
else is_valid
)
return outcome
async def validate_scopes(
request,
scopes,
user_scopes,
override,
destructure,
require_all=True,
require_all_actions=True,
request_args=[],
request_kwargs={},
):
scopes = await utils.call(destructure, scopes)
scopes = await utils.call(scopes, request, *request_args, **request_kwargs)
if not isinstance(scopes, (list, tuple)):
scopes = [scopes]
method = all if require_all else any
return method(
validate_single_scope(
x,
user_scopes,
require_all_actions=require_all_actions,
override=override,
)
for x in scopes
)
|
/sanic-jwt-1.8.0.tar.gz/sanic-jwt-1.8.0/sanic_jwt/validators.py
| 0.563618 | 0.323073 |
validators.py
|
pypi
|
from sanic.exceptions import SanicException
from sanic.exceptions import Unauthorized as SanicUnauthorized
class SanicJWTException(SanicException):
pass
class InvalidToken(SanicJWTException):
pass
class AuthenticationFailed(SanicJWTException):
status_code = 401
def __init__(self, message="Authentication failed.", **kwargs):
super().__init__(message, **kwargs)
class MissingAuthorizationHeader(SanicJWTException):
status_code = 400
def __init__(self, message="Authorization header not present.", **kwargs):
super().__init__(message, **kwargs)
class MissingAuthorizationCookie(SanicJWTException):
status_code = 400
def __init__(self, message="Authorization cookie not present.", **kwargs):
super().__init__(message, **kwargs)
class MissingAuthorizationQueryArg(SanicJWTException):
status_code = 400
def __init__(
self, message="Authorization query argument not present.", **kwargs
):
super().__init__(message, **kwargs)
class InvalidAuthorizationHeader(SanicJWTException):
status_code = 400
def __init__(self, message="Authorization header is invalid.", **kwargs):
super().__init__(message, **kwargs)
class InvalidCustomClaim(SanicJWTException):
status_code = 500
def __init__(self, message="Custom claim is invalid.", **kwargs):
super().__init__(message, **kwargs)
class InvalidCustomClaimError(SanicJWTException):
status_code = 401
def __init__(self, message="Custom claim value was not met.", **kwargs):
super().__init__(message, **kwargs)
class InvalidVerification(SanicJWTException):
status_code = 500
def __init__(
self,
message="Verifications must be a callable object "
"returning a boolean value.",
**kwargs
):
super().__init__(message, **kwargs)
class InvalidVerificationError(SanicJWTException):
status_code = 401
def __init__(self, message="Verifications were not met.", **kwargs):
super().__init__(message, **kwargs)
class AuthenticateNotImplemented(SanicJWTException):
status_code = 500
def __init__(
self,
message="Sanic JWT initialized without providing an authenticate "
"method.",
**kwargs
):
super().__init__(message, **kwargs)
class RefreshTokenNotImplemented(SanicJWTException):
status_code = 500
def __init__(
self, message="Refresh tokens have not been enabled.", **kwargs
):
super().__init__(message, **kwargs)
class ScopesNotImplemented(SanicJWTException):
status_code = 500
def __init__(
self,
message="Scopes have not been enabled. Initialize with "
"add_scopes_to_payload to provide scoping.",
**kwargs
):
super().__init__(message, **kwargs)
class UserSecretNotImplemented(SanicJWTException):
status_code = 500
def __init__(
self, message="User secrets have not been enabled.", **kwargs
):
super().__init__(message, **kwargs)
class MissingRegisteredClaim(SanicJWTException):
status_code = 500
def __init__(
self,
message="One or more claims have been registered, but your "
"extend_payload() method does not supply them. ",
missing=None,
**kwargs
):
if missing: # noqa
message += str(missing)
super().__init__(message, **kwargs)
class MeEndpointNotSetup(SanicJWTException):
status_code = 500
def __init__(
self,
message="/me endpoint has not been setup. Pass retrieve_user if "
"you with to proceeed.",
**kwargs
):
super().__init__(message, **kwargs)
class InvalidRetrieveUserObject(SanicJWTException):
status_code = 500
def __init__(
self,
message="The retrieve_user method should return either a dict or "
"an object with a to_dict or __json__ method.",
**kwargs
):
super().__init__(message, **kwargs)
class InitializationFailure(SanicJWTException):
status_code = 500
def __init__(
self,
message="Sanic JWT was not initialized properly. It must be "
"instantiated on a sanic.Sanic or sanic.Blueprint "
"instance.",
**kwargs
):
super().__init__(message, **kwargs)
class Unauthorized(SanicJWTException, SanicUnauthorized):
def __init__(self, message="Auth required.", **kwargs):
super().__init__(message, scheme="Bearer", **kwargs)
class InvalidClassViewsFormat(SanicJWTException):
def __init__(
self,
message="class_views should follow this format ('<SOME ROUTE>', "
"ClassInheritedFromBaseEndpoint)",
**kwargs
):
super().__init__(message, **kwargs)
class InvalidConfiguration(SanicJWTException):
def __init__(self, message="", **kwargs):
message = (
"An invalid setting was passed to the Sanic JWT "
"configuration: " + str(message)
)
super().__init__(message, **kwargs)
class InvalidPayload(SanicJWTException):
status_code = 500
def __init__(self, message="", **kwargs):
message = (
"Payload must be a dictionary with a key mapped to "
"SANIC_JWT_USER_ID"
)
super().__init__(message, **kwargs)
class RequiredKeysNotFound(SanicJWTException):
def __init__(
self,
message="You must provide both (valid) SANIC_JWT_PUBLIC_KEY and "
"SANIC_JWT_PRIVATE_KEY when using asymmetric "
"cryptographic algorithms like RS*, EC* or PS*",
**kwargs
):
super().__init__(message, **kwargs)
class ProvidedPathNotFound(SanicJWTException):
def __init__(
self, message="The Path object given is not a valid file", **kwargs
):
super().__init__(message, **kwargs)
class LoopNotRunning(SanicJWTException):
def __init__(
self, message="The asyncio loop is not currently running", **kwargs
):
super().__init__(message, **kwargs)
|
/sanic-jwt-1.8.0.tar.gz/sanic-jwt-1.8.0/sanic_jwt/exceptions.py
| 0.821903 | 0.203252 |
exceptions.py
|
pypi
|
import copy
from oauthlib.common import unicode_type
__all__ = ['douban', 'dropbox', 'facebook', 'github', 'google', 'linkedin', 'twitter', 'weibo']
class RemoteAppFactory(object):
"""The factory to create remote app and bind it to given extension.
:param default_name: the default name which be used for registering.
:param kwargs: the pre-defined kwargs.
:param docstring: the docstring of factory.
"""
def __init__(self, default_name, kwargs, docstring=''):
assert 'name' not in kwargs
assert 'register' not in kwargs
self.default_name = default_name
self.kwargs = kwargs
self._kwargs_processor = None
self.__doc__ = docstring.lstrip()
def register_to(self, oauth, name=None, **kwargs):
"""Creates a remote app and registers it."""
kwargs = self._process_kwargs(name=(name or self.default_name), **kwargs)
return oauth.remote_app(**kwargs)
def create(self, oauth, **kwargs):
"""Creates a remote app only."""
kwargs = self._process_kwargs(name=self.default_name, register=False, **kwargs)
return oauth.remote_app(**kwargs)
def kwargs_processor(self, fn):
"""Sets a function to process kwargs before creating any app."""
self._kwargs_processor = fn
return fn
def _process_kwargs(self, **kwargs):
final_kwargs = copy.deepcopy(self.kwargs)
# merges with pre-defined kwargs
final_kwargs.update(copy.deepcopy(kwargs))
# use name as app key
final_kwargs.setdefault('app_key', final_kwargs['name'].upper())
# processes by pre-defined function
if self._kwargs_processor is not None:
final_kwargs = self._kwargs_processor(**final_kwargs)
return final_kwargs
def make_scope_processor(default_scope):
def processor(**kwargs):
# request_token_params
scope = kwargs.pop('scope', [default_scope]) # default scope
if not isinstance(scope, (unicode_type, bytes)):
scope = ','.join(scope) # allows list-style scope
request_token_params = kwargs.setdefault('request_token_params', {})
request_token_params.setdefault('scope', scope) # doesn't override
return kwargs
return processor
douban = RemoteAppFactory(
'douban',
{
'base_url': 'https://api.douban.com/v2/',
'request_token_url': None,
'access_token_url': 'https://www.douban.com/service/auth2/token',
'authorize_url': 'https://www.douban.com/service/auth2/auth',
'access_token_method': 'POST',
},
"""
The OAuth app for douban.com API.
:param scope: optional. default: ``['douban_basic_common']``.
see also: http://developers.douban.com/wiki/?title=oauth2
""",
)
douban.kwargs_processor(make_scope_processor('douban_basic_common'))
dropbox = RemoteAppFactory(
'dropbox',
{
'base_url': 'https://www.dropbox.com/1/',
'request_token_url': None,
'access_token_url': 'https://api.dropbox.com/1/oauth2/token',
'authorize_url': 'https://www.dropbox.com/1/oauth2/authorize',
'access_token_method': 'POST',
'request_token_params': {},
},
"""The OAuth app for Dropbox API.""",
)
facebook = RemoteAppFactory(
'facebook',
{
'request_token_params': {'scope': 'email'},
'base_url': 'https://graph.facebook.com',
'request_token_url': None,
'access_token_url': '/oauth/access_token',
'authorize_url': 'https://www.facebook.com/dialog/oauth',
},
"""
The OAuth app for Facebook API.
:param scope: optional. default: ``['email']``.
""",
)
facebook.kwargs_processor(make_scope_processor('email'))
github = RemoteAppFactory(
'github',
{
'base_url': 'https://api.github.com/',
'request_token_url': None,
'access_token_method': 'POST',
'access_token_url': 'https://github.com/login/oauth/access_token',
'authorize_url': 'https://github.com/login/oauth/authorize',
},
"""
The OAuth app for GitHub API.
:param scope: optional. default: ``['user:email']``.
""",
)
github.kwargs_processor(make_scope_processor('user:email'))
google = RemoteAppFactory(
'google',
{
'base_url': 'https://www.googleapis.com/oauth2/v1/',
'request_token_url': None,
'access_token_method': 'POST',
'access_token_url': 'https://accounts.google.com/o/oauth2/token',
'authorize_url': 'https://accounts.google.com/o/oauth2/auth',
},
"""
The OAuth app for Google API.
:param scope: optional.
default: ``['email']``.
""",
)
google.kwargs_processor(make_scope_processor('email'))
twitter = RemoteAppFactory(
'twitter',
{
'base_url': 'https://api.twitter.com/1.1/',
'request_token_url': 'https://api.twitter.com/oauth/request_token',
'access_token_url': 'https://api.twitter.com/oauth/access_token',
'authorize_url': 'https://api.twitter.com/oauth/authenticate',
},
"""The OAuth app for Twitter API.""",
)
weibo = RemoteAppFactory(
'weibo',
{
'base_url': 'https://api.weibo.com/2/',
'authorize_url': 'https://api.weibo.com/oauth2/authorize',
'request_token_url': None,
'access_token_method': 'POST',
'access_token_url': 'https://api.weibo.com/oauth2/access_token',
# since weibo's response is a shit, we need to force parse the content
'content_type': 'application/json',
},
"""
The OAuth app for weibo.com API.
:param scope: optional. default: ``['email']``
""",
)
weibo.kwargs_processor(make_scope_processor('email'))
def change_weibo_header(uri, headers, body):
"""Since weibo is a rubbish server, it does not follow the standard,
we need to change the authorization header for it."""
auth = headers.get('Authorization')
if auth:
auth = auth.replace('Bearer', 'OAuth2')
headers['Authorization'] = auth
return uri, headers, body
weibo.pre_request = change_weibo_header
linkedin = RemoteAppFactory(
'linkedin',
{
'request_token_params': {'state': 'RandomString'},
'base_url': 'https://api.linkedin.com/v1/',
'request_token_url': None,
'access_token_method': 'POST',
'access_token_url': 'https://www.linkedin.com/uas/oauth2/accessToken',
'authorize_url': 'https://www.linkedin.com/uas/oauth2/authorization',
},
"""
The OAuth app for LinkedIn API.
:param scope: optional. default: ``['r_basicprofile']``
""",
)
linkedin.kwargs_processor(make_scope_processor('r_basicprofile'))
def change_linkedin_query(uri, headers, body):
auth = headers.pop('Authorization')
headers['x-li-format'] = 'json'
if auth:
auth = auth.replace('Bearer', '').strip()
if '?' in uri:
uri += '&oauth2_access_token=' + auth
else:
uri += '?oauth2_access_token=' + auth
return uri, headers, body
linkedin.pre_request = change_linkedin_query
|
/Sanic_OAuthlib-0.4.0-py3-none-any.whl/sanic_oauthlib/contrib/apps.py
| 0.721939 | 0.219452 |
apps.py
|
pypi
|
import inspect
import warnings
import yaml
class OpenAPIDocstringParser:
def __init__(self, docstring: str):
"""
Args:
docstring (str): docstring of function to be parsed
"""
if docstring is None:
docstring = ""
self.docstring = inspect.cleandoc(docstring)
def to_openAPI_2(self) -> dict:
"""
Returns:
json style dict: dict to be read for the path by swagger 2.0 UI
"""
raise NotImplementedError()
def to_openAPI_3(self) -> dict:
"""
Returns:
json style dict: dict to be read for the path by swagger 3.0.0 UI
"""
raise NotImplementedError()
class YamlStyleParametersParser(OpenAPIDocstringParser):
def _parse_no_yaml(self, doc: str) -> dict:
"""
Args:
doc (str): section of doc before yaml, or full section of doc
Returns:
json style dict: dict to be read for the path by swagger UI
"""
# clean again in case further indentation can be removed,
# usually this do nothing...
doc = inspect.cleandoc(doc)
if len(doc) == 0:
return {}
lines = doc.split("\n")
if len(lines) == 1:
return {"summary": lines[0]}
else:
summary = lines.pop(0)
# remove empty lines at the beginning of the description
while len(lines) and lines[0].strip() == "":
lines.pop(0)
if len(lines) == 0:
return {"summary": summary}
else:
# use html tag to preserve linebreaks
return {"summary": summary, "description": "<br>".join(lines)}
def _parse_yaml(self, doc: str) -> dict:
"""
Args:
doc (str): section of doc detected as openapi yaml
Returns:
json style dict: dict to be read for the path by swagger UI
Warns:
UserWarning if the yaml couldn't be parsed
"""
try:
return yaml.safe_load(doc)
except Exception as e:
warnings.warn(
"error parsing openAPI yaml, ignoring it. ({})".format(e)
)
return {}
def _parse_all(self) -> dict:
if "openapi:\n" not in self.docstring:
return self._parse_no_yaml(self.docstring)
predoc, yamldoc = self.docstring.split("openapi:\n", 1)
conf = self._parse_no_yaml(predoc)
conf.update(self._parse_yaml(yamldoc))
return conf
def to_openAPI_2(self) -> dict:
return self._parse_all()
def to_openAPI_3(self) -> dict:
return self._parse_all()
|
/sanic-openapi-21.12.0.tar.gz/sanic-openapi-21.12.0/sanic_openapi/autodoc.py
| 0.610337 | 0.331931 |
autodoc.py
|
pypi
|
import json
import typing as t
from datetime import date, datetime, time
from enum import Enum
from inspect import isclass
from typing import Any, Dict, List, Optional, Union, get_type_hints
class Definition:
__fields: dict
__nullable__: Optional[List[str]] = []
def __init__(self, **kwargs):
self.__fields = self.guard(kwargs)
@property
def fields(self):
return self.__fields
def guard(self, fields):
return {
k: v
for k, v in fields.items()
if k in _properties(self).keys() or k.startswith("x-")
}
def serialize(self):
return {
k: v
for k, v in _serialize(self.fields).items()
if (
v
or (
isinstance(self.__nullable__, list)
and (not self.__nullable__ or k in self.__nullable__)
)
)
}
def __str__(self):
return json.dumps(self.serialize())
def apply(self, func, operations, *args, **kwargs):
op = operations[func]
method_name = getattr(
self.__class__, "__method__", self.__class__.__name__.lower()
)
method = getattr(op, method_name)
if not args and not kwargs:
kwargs = self.__dict__
method(*args, **kwargs)
class Schema(Definition):
title: str
description: str
type: str
format: str
nullable: bool
required: bool
default: None
example: None
oneOf: List[Definition]
anyOf: List[Definition]
allOf: List[Definition]
multipleOf: int
maximum: int
exclusiveMaximum: bool
minimum: int
exclusiveMinimum: bool
maxLength: int
minLength: int
pattern: str
enum: Union[List[Any], Enum]
@staticmethod
def make(value, **kwargs):
if isinstance(value, Schema):
return value
if value == bool:
return Boolean(**kwargs)
elif value == int:
return Integer(**kwargs)
elif value == float:
return Float(**kwargs)
elif value == str:
return String(**kwargs)
elif value == bytes:
return Byte(**kwargs)
elif value == bytearray:
return Binary(**kwargs)
elif value == date:
return Date(**kwargs)
elif value == time:
return Time(**kwargs)
elif value == datetime:
return DateTime(**kwargs)
_type = type(value)
if _type == bool:
return Boolean(default=value, **kwargs)
elif _type == int:
return Integer(default=value, **kwargs)
elif _type == float:
return Float(default=value, **kwargs)
elif _type == str:
return String(default=value, **kwargs)
elif _type == bytes:
return Byte(default=value, **kwargs)
elif _type == bytearray:
return Binary(default=value, **kwargs)
elif _type == date:
return Date(**kwargs)
elif _type == time:
return Time(**kwargs)
elif _type == datetime:
return DateTime(**kwargs)
elif _type == list:
if len(value) == 0:
schema = Schema(nullable=True)
elif len(value) == 1:
schema = Schema.make(value[0])
else:
schema = Schema(oneOf=[Schema.make(x) for x in value])
return Array(schema, **kwargs)
elif _type == dict:
return Object.make(value, **kwargs)
elif _type == t._GenericAlias and value.__origin__ == list:
return Array(Schema.make(value.__args__[0]), **kwargs)
else:
return Object.make(value, **kwargs)
class Boolean(Schema):
def __init__(self, **kwargs):
super().__init__(type="boolean", **kwargs)
class Integer(Schema):
def __init__(self, **kwargs):
super().__init__(type="integer", format="int32", **kwargs)
class Long(Schema):
def __init__(self, **kwargs):
super().__init__(type="integer", format="int64", **kwargs)
class Float(Schema):
def __init__(self, **kwargs):
super().__init__(type="number", format="float", **kwargs)
class Double(Schema):
def __init__(self, **kwargs):
super().__init__(type="number", format="double", **kwargs)
class String(Schema):
def __init__(self, **kwargs):
super().__init__(type="string", **kwargs)
class Byte(Schema):
def __init__(self, **kwargs):
super().__init__(type="string", format="byte", **kwargs)
class Binary(Schema):
def __init__(self, **kwargs):
super().__init__(type="string", format="binary", **kwargs)
class Date(Schema):
def __init__(self, **kwargs):
super().__init__(type="string", format="date", **kwargs)
class Time(Schema):
def __init__(self, **kwargs):
super().__init__(type="string", format="time", **kwargs)
class DateTime(Schema):
def __init__(self, **kwargs):
super().__init__(type="string", format="date-time", **kwargs)
class Password(Schema):
def __init__(self, **kwargs):
super().__init__(type="string", format="password", **kwargs)
class Email(Schema):
def __init__(self, **kwargs):
super().__init__(type="string", format="email", **kwargs)
class Object(Schema):
properties: Dict[str, Schema]
maxProperties: int
minProperties: int
def __init__(self, properties: Dict[str, Schema] = None, **kwargs):
super().__init__(type="object", properties=properties or {}, **kwargs)
@classmethod
def make(cls, value: Any, **kwargs):
return cls(
{k: Schema.make(v) for k, v in _properties(value).items()},
**kwargs,
)
class Array(Schema):
items: Any
maxItems: int
minItems: int
uniqueItems: bool
def __init__(self, items: Any, **kwargs):
super().__init__(type="array", items=Schema.make(items), **kwargs)
def _serialize(value) -> Any:
if isinstance(value, Definition):
return value.serialize()
if isinstance(value, type) and issubclass(value, Enum):
return [item.value for item in value.__members__.values()]
if isinstance(value, dict):
return {k: _serialize(v) for k, v in value.items()}
if isinstance(value, list):
return [_serialize(v) for v in value]
return value
def _properties(value: object) -> Dict:
try:
fields = {x: v for x, v in value.__dict__.items()}
except AttributeError:
fields = {}
cls = value if isclass(value) else value.__class__
return {
k: v
for k, v in {**get_type_hints(cls), **fields}.items()
if not k.startswith("_")
}
|
/sanic-openapi-21.12.0.tar.gz/sanic-openapi-21.12.0/sanic_openapi/openapi3/types.py
| 0.797596 | 0.234483 |
types.py
|
pypi
|
from typing import Any, Dict, List, Optional, Type, Union
from .types import Definition, Schema
class Reference(Schema):
def __init__(self, value):
super().__init__(**{"$ref": value})
def guard(self, fields: Dict[str, Any]):
return fields
class Contact(Definition):
name: str
url: str
email: str
class License(Definition):
name: str
url: str
def __init__(self, name: str, **kwargs):
super().__init__(name=name, **kwargs)
class Info(Definition):
title: str
description: str
termsOfService: str
contact: Contact
license: License
version: str
def __init__(self, title: str, version: str, **kwargs):
super().__init__(title=title, version=version, **kwargs)
class Example(Definition):
summary: str
description: str
value: Any
externalValue: str
def __init__(self, value: Any = None, **kwargs):
super().__init__(value=value, **kwargs)
@staticmethod
def make(value: Any, **kwargs):
return Example(value, **kwargs)
@staticmethod
def external(value: Any, **kwargs):
return Example(externalValue=value, **kwargs)
class MediaType(Definition):
schema: Schema
example: Any
def __init__(self, schema: Schema, **kwargs):
super().__init__(schema=schema, **kwargs)
@staticmethod
def make(value: Any):
return MediaType(Schema.make(value))
@staticmethod
def all(content: Any):
media_types = (
content if isinstance(content, dict) else {"*/*": content or {}}
)
return {x: MediaType.make(v) for x, v in media_types.items()}
class Response(Definition):
content: Union[Any, Dict[str, Union[Any, MediaType]]]
description: Optional[str]
status: str
def __init__(
self,
content: Optional[Union[Any, Dict[str, Union[Any, MediaType]]]] = None,
status: int = 200,
description: Optional[str] = None,
**kwargs,
):
super().__init__(
content=content, status=status, description=description, **kwargs
)
@staticmethod
def make(content, description: str = None, **kwargs):
if not description:
description = "Default Response"
return Response(
MediaType.all(content), description=description, **kwargs
)
class RequestBody(Definition):
description: Optional[str]
required: Optional[bool]
content: Union[Any, Dict[str, Union[Any, MediaType]]]
def __init__(
self,
content: Union[Any, Dict[str, Union[Any, MediaType]]],
required: Optional[bool] = None,
description: Optional[str] = None,
**kwargs,
):
"""Can be initialized with content in one of a few ways:
RequestBody(SomeModel)
RequestBody({"application/json": SomeModel})
RequestBody({"application/json": {"name": str}})
"""
super().__init__(
content=content,
required=required,
description=description,
**kwargs,
)
@staticmethod
def make(content: Any, **kwargs):
return RequestBody(MediaType.all(content), **kwargs)
class ExternalDocumentation(Definition):
url: str
description: str
def __init__(self, url: str, description=None):
super().__init__(url=url, description=description)
@staticmethod
def make(url: str, description: str = None):
return ExternalDocumentation(url, description)
class Header(Definition):
name: str
description: str
externalDocs: ExternalDocumentation
def __init__(self, url: str, description=None):
super().__init__(url=url, description=description)
@staticmethod
def make(url: str, description: str = None):
return Header(url, description)
class Parameter(Definition):
name: str
schema: Union[Type, Schema]
location: str
description: Optional[str]
required: Optional[bool]
deprecated: Optional[bool]
allowEmptyValue: Optional[bool]
__nullable__ = None
def __init__(
self,
name: str,
schema: Union[Type, Schema],
location: str = "query",
description: Optional[str] = None,
required: Optional[bool] = None,
deprecated: Optional[bool] = None,
allowEmptyValue: Optional[bool] = None,
**kwargs,
):
super().__init__(
name=name,
schema=schema,
location=location,
description=description,
required=required,
deprecated=deprecated,
allowEmptyValue=allowEmptyValue,
**kwargs,
)
@property
def fields(self):
values = super().fields
if "location" in values:
values["in"] = values.pop("location")
return values
@staticmethod
def make(name: str, schema: type, location: str, **kwargs):
if location == "path":
kwargs["required"] = True
return Parameter(name, Schema.make(schema), location, **kwargs)
class Operation(Definition):
tags: List[str]
summary: str
description: str
operationId: str
requestBody: RequestBody
externalDocs: ExternalDocumentation
parameters: List[Parameter]
responses: Dict[str, Response]
security: Dict[str, List[str]]
callbacks: List[str] # TODO
deprecated: bool
class PathItem(Definition):
summary: str
description: str
get: Operation
put: Operation
post: Operation
delete: Operation
options: Operation
head: Operation
patch: Operation
trace: Operation
class SecurityScheme(Definition):
type: str
description: str
scheme: str
bearerFormat: str
name: str
location: str
openIdConnectUrl: str
def __init__(self, type: str, **kwargs):
super().__init__(type=type, **kwargs)
@property
def fields(self):
values = super().fields
if "location" in values:
values["in"] = values.pop("location")
return values
@staticmethod
def make(_type: str, cls: Type, **kwargs):
params = cls.__dict__ if hasattr(cls, "__dict__") else {}
return SecurityScheme(_type, **params, **kwargs)
class ServerVariable(Definition):
default: str
description: str
enum: List[str]
def __init__(self, default: str, **kwargs):
super().__init__(default=default, **kwargs)
class Server(Definition):
url: str
description: str
variables: Dict[str, ServerVariable]
def __init__(
self, url: str, description: str = None, variables: dict = None
):
super().__init__(
url=url, description=description, variables=variables or []
)
class Tag(Definition):
name: str
description: str
externalDocs: ExternalDocumentation
def __init__(self, name: str, **kwargs):
super().__init__(name=name, **kwargs)
class Components(Definition):
# This class is not being used in sanic-openapi right now, but the
# definition is kept here to keep in close accordance with the openapi
# spec, in case it is desired to be added later.
schemas: Dict[str, Schema]
responses: Dict[str, Response]
parameters: Dict[str, Parameter]
examples: Dict[str, Example]
requestBodies: Dict[str, RequestBody]
headers: Dict[str, Header]
securitySchemes: Dict[str, SecurityScheme]
links: Dict[str, Schema] # TODO
callbacks: Dict[str, Schema] # TODO
class OpenAPI(Definition):
openapi: str
info: Info
servers: List[Server]
paths: Dict[str, PathItem]
components: Components
security: Dict[str, SecurityScheme]
tags: List[Tag]
externalDocs: ExternalDocumentation
def __init__(self, info: Info, paths: Dict[str, PathItem], **kwargs):
super().__init__(openapi="3.0.0", info=info, paths=paths, **kwargs)
|
/sanic-openapi-21.12.0.tar.gz/sanic-openapi-21.12.0/sanic_openapi/openapi3/definitions.py
| 0.888858 | 0.273185 |
definitions.py
|
pypi
|
from functools import partial
from typing import Any, NamedTuple, Optional
from . import doc
# The classes defined in this file are deprecated, and will be removed
# An appropriate warning is raised below
class Response(
NamedTuple(
"Response",
[("code", int), ("model", Any), ("description", Optional[str])],
)
):
"""
HTTP status code - returned object model pair with optional description.
If `model` is a class that has a docstring, the its docstring will be used
as description if `description` is not set.
"""
def __new__(cls, code: int, model: Any, description: Optional[str] = None):
return super().__new__(cls, code, model, description)
class API:
"""
Decorator factory class for documenting routes using `sanic_openapi` and
optionally registering them in a `sanic` application or blueprint.
Supported class attribute names match the corresponding
`sanic_openapi.doc` decorator's name and attribute values work exactly as
if they were passed to the given decorator unless explicitly documented
otherwise. The supported class attributes (all of which are optional) are
as follows:
- `summary`: Its value should be the short summary of the route. If
neither `summary` nor `description` is specified, then the first
paragraph of the API class' documentation will be used instead.
You may also set it to `None` to disable automatic `summary` and
`description` generation.
- `description`: A longer description of the route. If neither
`summary` nor `description` is specified, then the API class'
documentation will be used except its first paragraph that serves
as the default summary. You may also set it to `None` to disable
automatic `summary` and `description` generation.
- `exclude`: Whether to exclude the route (and related models) from
the API documentation.
- `consumes`: The model of the data the API route consumes. If
`consumes` is a class that has a docstring, then the docstring
will be used as the description of th data.
- `consumes_content_type`: The content type of the data the API route
consumes.
- `consumes_location`: The location where the data is expected
(`query` or `body`).
- `consumes_required`: Whether the consumed data is required.
- `produces`: The model of the data the API route produces.
- `produces_content_type`: The content type of the data the API
route produces.
- `produces_description`: The description of the data the API
route produces. If not specified but `produces` is a class that
has a docstring, then the docstring will be used as the
default description.
- `response`: A `Response` instance or a sequence of `Response`
instances that describe the route's response for different HTTP
status codes. The value of the `produces` attribute corresponds
to HTTP 200, you don't have to specify that here.
- `tag`: The tags/groups the API route belongs to.
Example:
```Python
class JSONConsumerAPI(API):
consumes_content_type = "application/json"
consumes_location = "body"
consumes_required = True
class JSONProducerAPI(API):
produces_content_type = "application/json"
class MyAPI(JSONConsumerAPI, JSONProducerAPI):
\"\"\"
Route *summary* in first paragraph.
First paragraph of route *description*.
Second paragraph of route *description*.
\"\"\"
class consumes:
foo = str
bar = str
class produces:
result = bool
# Document and register the route at once.
@MyAPI.post(app, "/my_route")
def my_route(request: Request):
return {"result": True}
# Or simply document a route.
@app.post("/my_route")
@MyAPI
def my_route(request: Request):
return {"result": True}
```
Additionally, you may specify a `decorators` class attribute, whose value
must be a sequence of decorators to apply on the decorated routes. These
decorators will be applied *before* the `sanic_openapi` decorators - and
the `sanic` routing decorators if the routing decorators provided by this
class are used - in *reverse* order. It means that the following cases
are equivalent:
```Python
class Data(API):
class consumes:
stg = str
class DecoratedData(Data):
decorators = (first, second)
@DecoratedData.get(app, "/data")
def data_all_in_one(request: Request):
return "data"
@app.get("/data")
@DecoratedData
def data_doc_and_decorators_in_one(request: Request):
return "data"
@Data.get(app, "/data")
@first
@second
def data_routing_and_doc_in_one(request: Request):
return "data"
@app.get("/data")
@Data
@first
@second
def data(request: Request):
return "data"
```
It is possible to override all the described class attributes on a per
decorator basis simply by passing the desired custom value to the decorator
as a keyword argument:
```Python
class JSONConsumerAPI(API):
consumes_content_type = "application/json"
consumes_location = "body"
consumes_required = True
class consumes:
foo = str
bar = str
# The consumed data is required.
@JSONConsumerAPI.post(app, "/data")
def data(request: Request):
return "data"
# The consumed data is optional.
@app.post("/data_optional")
@JSONConsumerAPI(consumes_required=False)
def data_consumed_not_required(request: Request):
return "data"
```
"""
__MISSING = "__MISSING"
def __new__(cls, func=None, **kwargs):
"""
Decorator that automaticaly documents the decorated route and returns
the decorated method.
Arguments:
func: The decorated request handler function.
"""
import warnings
warnings.warn(
"sanic_openapi.api.API has been marked as deprecated, and may be "
"removed in 0.6.4. \n If you are using this class, please leave "
"an issue in https://github.com/sanic-org/sanic-openapi/issues",
UserWarning,
)
if func is None:
return partial(cls, **kwargs)
def get_attribute(obj, name, default):
"""
Specialized attribute getter that checks every attribute name in
`kwargs` first to allow inline overrides of attributes.
Arguments:
obj: The object to get the attribute value from.
name: The name of the attribute to look up.
default: The default value to return if the `name` attribute
doesn't exist.
"""
return (
kwargs[name] if name in kwargs else getattr(obj, name, default)
)
# The _add_decorators() call must precede everything else.
func = cls._add_decorators(func, get_attribute)
func = cls._add_base_data(func, get_attribute)
func = cls._add_consumes(func, get_attribute)
func = cls._add_produces(func, get_attribute)
func = cls._add_responses(func, get_attribute)
func = cls._add_tags(func, get_attribute)
return func
@classmethod
def _add_base_data(cls, func, get_attribute):
"""
Adds basic route documentation such as summary and description.
Arguments:
func: The decorated request handler function.
get_attribute: Attribute getter function to use.
"""
summary = get_attribute(cls, "summary", cls.__MISSING)
description = get_attribute(cls, "description", cls.__MISSING)
# If there was no explicit summary or description, determine them from
# the class documentation if that exists.
if (
summary == cls.__MISSING
and description == cls.__MISSING
and cls.__doc__
):
class_doc_parts = cls.__doc__.strip().split("\n\n")
if len(class_doc_parts) > 0:
summary = class_doc_parts[0].strip()
if len(class_doc_parts) > 1:
# Preserve paragraphs.
description = "<br><br>".join(
part.strip() for part in class_doc_parts[1:]
)
return doc.route(
summary=summary if summary != cls.__MISSING else None,
description=description if description != cls.__MISSING else None,
exclude=cls._exclude(get_attribute),
)(func)
@classmethod
def _add_consumes(cls, func, get_attribute):
"""
Adds the documentation of the consumed data to the route.
Arguments:
func: The decorated request handler function.
get_attribute: Attribute getter function to use.
"""
value = get_attribute(cls, "consumes", None)
# Don't register the consumed model if the route is excluded.
if value is None or cls._exclude(get_attribute):
return func
# If value is a type (class), convert it to a doc.Object to be able to
# specify its name to avoid model name conflicts and have a more
# readable doc.
if isinstance(value, type):
value = doc.Object(
value,
object_name=cls.__name__ + "Consumes",
description=value.__doc__,
)
# Use the same default values as in doc.consumes().
return doc.consumes(
value,
content_type=get_attribute(cls, "consumes_content_type", None),
location=get_attribute(cls, "consumes_location", "query"),
required=get_attribute(cls, "consumes_required", False),
)(func)
@classmethod
def _add_decorators(cls, func, get_attribute):
"""
Adds the custom route decorators from the `decorators` class attribute
to the route.
Arguments:
func: The decorated request handler function.
get_attribute: Attribute getter function to use.
"""
decorators = get_attribute(cls, "decorators", None)
if decorators is not None:
for decorator in reversed(decorators):
func = decorator(func)
return func
@classmethod
def _add_produces(cls, func, get_attribute):
"""
Adds the documentation of the produced data to the route.
Arguments:
func: The decorated request handler function.
get_attribute: Attribute getter function to use.
"""
value = get_attribute(cls, "produces", None)
# Don't register the produced model if the route is excluded.
if value is None or cls._exclude(get_attribute):
return func
# If value is a type (class), convert it to a doc.Object to be able to
# specify its name to avoid model name conflicts and have a more
# readable doc.
if isinstance(value, type):
produces_doc = value.__doc__.strip() if value.__doc__ else None
produces_description = get_attribute(
cls, "produces_description", produces_doc
)
value = doc.Object(
value,
object_name=cls.__name__ + "Produces",
description=produces_doc,
)
else:
produces_description = get_attribute(
cls, "produces_description", None
)
# User the same default values as in doc.produces().
return doc.produces(
value,
content_type=get_attribute(cls, "produces_content_type", None),
description=produces_description,
)(func)
@classmethod
def _add_response(cls, func, response):
"""
Adds the documentation of the behavior defined by the given `Response`
instance to the route.
Arguments:
func: The decorated request handler function.
response: The `Response` instance that defines the route's
behavior.
"""
description = response.description
if description is None and isinstance(response.model, type):
description = (
response.model.__doc__.strip()
if response.model.__doc__
else None
)
return doc.response(
response.code, response.model, description=description
)(func)
@classmethod
def _add_responses(cls, func, get_attribute):
"""
Adds the documentation of responses corresponding to specific HTTP
status codes to the route.
Arguments:
func: The decorated request handler function.
get_attribute: Attribute getter function to use.
"""
response = get_attribute(cls, "response", None)
if response is None:
return func
if isinstance(response, Response):
return cls._add_response(func, response)
if isinstance(response, (list, tuple)):
for item in response:
func = cls._add_response(func, item)
return func
@classmethod
def _add_tags(cls, func, get_attribute):
"""
Adds tags to the route.
Arguments:
func: The decorated request handler function.
get_attribute: Attribute getter function to use.
"""
value = get_attribute(cls, "tag", None)
if isinstance(value, str):
func = doc.tag(value)(func)
elif isinstance(value, (list, tuple)):
for item in value:
func = doc.tag(item)(func)
return func
@classmethod
def _exclude(cls, get_attribute):
"""
Returns whether the route should be excluded from the documentation.
Arguments:
get_attribute: Attribute getter function to use.
"""
return get_attribute(cls, "exclude", None)
@classmethod
def delete(cls, app, uri, **kwargs):
"""
Decorator that registers the decorated route in the given `sanic`
application or blueprint with the given URI, and also documents its
API using `sanic_openapi`.
The decorated method will be registered for `DELETE` requests.
Keyword arguments that are not listed in arguments section will be
passed on to the `sanic` application's or blueprint's `delete()`
method as they are.
Arguments:
app: The `sanic` application or blueprint where the route should
be registered.
uri: The URI the route should be accessible at.
"""
def inner(func):
return app.delete(uri, **kwargs)(cls(func))
return inner
@classmethod
def get(cls, app, uri, **kwargs):
"""
Decorator that registers the decorated route in the given `sanic`
application or blueprint with the given URI, and also documents its API
using `sanic_openapi`.
The decorated method will be registered for `GET` requests.
Keyword arguments that are not listed in arguments section will be
passed on to the `sanic` application's or blueprint's `get()`
method as they are.
Arguments:
app: The `sanic` application or blueprint where the route should
be registered.
uri: The URI the route should be accessible at.
"""
def inner(func):
return app.get(uri, **kwargs)(cls(func))
return inner
@classmethod
def head(cls, app, uri, **kwargs):
"""
Decorator that registers the decorated route in the given `sanic`
application or blueprint with the given URI, and also documents its
API using `sanic_openapi`.
The decorated method will be registered for `HEAD` requests.
Keyword arguments that are not listed in arguments section will be
passed on to the `sanic` application's or blueprint's `head()`
method as they are.
Arguments:
app: The `sanic` application or blueprint where the route should
be registered.
uri: The URI the route should be accessible at.
"""
def inner(func):
return app.head(uri, **kwargs)(cls(func))
return inner
@classmethod
def options(cls, app, uri, **kwargs):
"""
Decorator that registers the decorated route in the given `sanic`
application or blueprint with the given URI, and also documents its
API using `sanic_openapi`.
The decorated method will be registered for `OPTIONS` requests.
Keyword arguments that are not listed in arguments section will be
passed on to the `sanic` application's or blueprint's `options()`
method as they are.
Arguments:
app: The `sanic` application or blueprint where the route should
be registered.
uri: The URI the route should be accessible at.
"""
def inner(func):
return app.options(uri, **kwargs)(cls(func))
return inner
@classmethod
def patch(cls, app, uri, **kwargs):
"""
Decorator that registers the decorated route in the given `sanic`
application or blueprint with the given URI, and also documents its
API using `sanic_openapi`.
The decorated method will be registered for `PATCH` requests.
Keyword arguments that are not listed in arguments section will be
passed on to the `sanic` application's or blueprint's `patch()`
method as they are.
Arguments:
app: The `sanic` application or blueprint where the route should
be registered.
uri: The URI the route should be accessible at.
"""
def inner(func):
return app.patch(uri, **kwargs)(cls(func))
return inner
@classmethod
def post(cls, app, uri, **kwargs):
"""
Decorator that registers the decorated route in the given `sanic`
application or blueprint with the given URI, and also documents its
API using `sanic_openapi`.
The decorated method will be registered for `POST` requests.
Keyword arguments that are not listed in arguments section will be
passed on to the `sanic` application's or blueprint's `post()` method
as they are.
Arguments:
app: The `sanic` application or blueprint where the route should
be registered.
uri: The URI the route should be accessible at.
"""
def inner(func):
return app.post(uri, **kwargs)(cls(func))
return inner
@classmethod
def put(cls, app, uri, **kwargs):
"""
Decorator that registers the decorated route in the given `sanic`
application or blueprint with the given URI, and also documents its
API using `sanic_openapi`.
The decorated method will be registered for `PUT` requests.
Keyword arguments that are not listed in arguments section will be
passed on to the `sanic` application's or blueprint's `put()`
method as they are.
Arguments:
app: The `sanic` application or blueprint where the route should
be registered.
uri: The URI the route should be accessible at.
"""
def inner(func):
return app.put(uri, **kwargs)(cls(func))
return inner
@classmethod
def route(cls, app, uri, *, methods, **kwargs):
"""
Decorator that registers the decorated route in the given `sanic`
application or blueprint with the given URI, and also documents its
API using `sanic_openapi`.
Keyword arguments that are not listed in arguments section will be
passed on to the `sanic` application's or blueprint's `route()`
method as they are.
Arguments:
app: The `sanic` application or blueprint where the route should
be registered.
uri: The URI the route should be accessible at.
"""
def inner(func):
return app.route(uri, methods=methods, **kwargs)(cls(func))
return inner
|
/sanic-openapi-21.12.0.tar.gz/sanic-openapi-21.12.0/sanic_openapi/openapi2/api.py
| 0.93734 | 0.730987 |
api.py
|
pypi
|
import json
from datetime import date, datetime, time
from typing import List, Dict, Any, get_type_hints
class Definition:
__fields: dict
def __init__(self, **kwargs):
self.__fields = self.guard(kwargs)
@property
def fields(self) -> Dict[str, Any]:
return self.__fields
def guard(self, fields: Dict[str, Any])-> Dict[str, Any]:
properties = props(self).keys()
return {x: v for x, v in fields.items() if x in properties}
def serialize(self):
return serialize(self.fields)
def __str__(self):
return json.dumps(self.serialize())
class Schema(Definition):
type: str
format: str
description: str
nullable: False
default: None
example: None
oneOf: List[Definition]
anyOf: List[Definition]
allOf: List[Definition]
class Boolean(Schema):
def __init__(self, **kwargs):
super().__init__(type="boolean", **kwargs)
class Integer(Schema):
def __init__(self, **kwargs):
super().__init__(type="integer", format="int32", **kwargs)
class Long(Schema):
def __init__(self, **kwargs):
super().__init__(type="integer", format="int64", **kwargs)
class Float(Schema):
def __init__(self, **kwargs):
super().__init__(type="number", format="float", **kwargs)
class Double(Schema):
def __init__(self, **kwargs):
super().__init__(type="number", format="double", **kwargs)
class String(Schema):
def __init__(self, **kwargs):
super().__init__(type="string", **kwargs)
class Byte(Schema):
def __init__(self, **kwargs):
super().__init__(type="string", format="byte", **kwargs)
class Binary(Schema):
def __init__(self, **kwargs):
super().__init__(type="string", format="binary", **kwargs)
class Date(Schema):
def __init__(self, **kwargs):
super().__init__(type="string", format="date", **kwargs)
class Time(Schema):
def __init__(self, **kwargs):
super().__init__(type="string", format="time", **kwargs)
class DateTime(Schema):
def __init__(self, **kwargs):
super().__init__(type="string", format="date-time", **kwargs)
class Password(Schema):
def __init__(self, **kwargs):
super().__init__(type="string", format="password", **kwargs)
class Email(Schema):
def __init__(self, **kwargs):
super().__init__(type="string", format="email", **kwargs)
class Object(Schema):
properties: Dict[str, Schema]
def __init__(self, properties: Dict[str, Schema]=None, **kwargs):
super().__init__(type="object", properties=properties or {}, **kwargs)
class Array(Schema):
items: Schema
def __init__(self, items: Schema, **kwargs):
super().__init__(type="array", items=items, **kwargs)
def serialize(value) -> Any:
if isinstance(value, Definition):
return value.serialize()
if isinstance(value, dict):
return {k: serialize(v) for k, v in value.items()}
if isinstance(value, list):
return [serialize(v) for v in value]
return value
class Contact(Definition):
name: str
url: str
email: str
class License(Definition):
name: str
url: str
def __init__(self, name: str, **kwargs):
super().__init__(name=name, **kwargs)
class Info(Definition):
title: str
description: str
termsOfService: str
contact: Contact
license: License
version: str
def __init__(self, title: str, version: str, **kwargs):
super().__init__(title=title, version=version, **kwargs)
class Example(Definition):
summary: str
description: str
value: Any
externalValue: str # TODO
def __init__(self, value: Any, **kwargs):
super().__init__(value=value, **kwargs)
class MediaType(Definition):
schema: Schema
example: Any
def __init__(self, schema: Schema, **kwargs):
super().__init__(schema=schema, **kwargs)
class Response(Definition):
content: Dict[str, MediaType]
description: str
def __init__(self, content=None, **kwargs):
super().__init__(content=content, **kwargs)
class RequestBody(Definition):
description: str
required: bool
content: Dict[str, MediaType]
def __init__(self, content: Dict[str, MediaType], **kwargs):
super().__init__(content=content, **kwargs)
class ExternalDocumentation(Definition):
name: str
url: str
def __init__(self, url: str, **kwargs):
super().__init__(url=url, **kwargs)
class Parameter(Definition):
name: str
location: str
description: str
required: bool
deprecated: bool
allowEmptyValue: bool
schema: Schema
def __init__(self, name, schema: Schema, location='query', **kwargs):
super().__init__(name=name, schema=schema, location=location, **kwargs)
@property
def fields(self):
values = super().fields
values['in'] = values.pop('location')
return values
class Operation(Definition):
tags: List[str]
summary: str
description: str
operationId: str
requestBody: RequestBody
externalDocs: ExternalDocumentation
parameters: List[Parameter]
responses: Dict[str, Response]
security: Dict[str, List[str]]
callbacks: List[str] # TODO
deprecated: bool
class PathItem(Definition):
summary: str
description: str
get: Operation
put: Operation
post: Operation
delete: Operation
options: Operation
head: Operation
patch: Operation
trace: Operation
class Components(Definition):
schemas: Dict[str, Schema]
responses: Dict[str, Response]
parameters: Dict[str, Parameter]
examples: Dict[str, Example]
requestBodies: Dict[str, RequestBody]
headers: Dict[str, Schema] # TODO
securitySchemes: Dict[str, Schema] # TODO
links: Dict[str, Schema] # TODO
callbacks: Dict[str, Schema] # TODO
class Tag(Definition):
name: str
description: str
externalDocs: ExternalDocumentation
def __init__(self, name: str, **kwargs):
super().__init__(name=name, **kwargs)
class OpenAPI(Definition):
openapi: str
info: Info
servers: [] # TODO
paths: Dict[str, PathItem]
components: Components
security: Dict[str, Any]
tags: List[Tag]
externalDocs: ExternalDocumentation
def __init__(self, info: Info, paths: Dict[str, PathItem], **kwargs):
super().__init__(openapi="3.0.0", info=info, paths=paths, **kwargs)
def props(value: Any) -> Dict[str, Any]:
fields = {x: v for x, v in value.__dict__.items() if not x.startswith('_')}
return {**get_type_hints(value.__class__), **fields}
def scheme(value: Any) -> Schema:
def __recur(fields: Dict):
return {k: scheme(v) for k, v in fields.items()}
if isinstance(value, Schema):
return value
if value == bool:
return Boolean()
elif value == int:
return Integer()
elif value == float:
return Float()
elif value == str:
return String()
elif value == bytes:
return Byte()
elif value == bytearray:
return Binary()
elif value == date:
return Date()
elif value == time:
return Time()
elif value == datetime:
return DateTime()
_type = type(value)
if _type == bool:
return Boolean(default=value)
elif _type == int:
return Integer(default=value)
elif _type == float:
return Float(default=value)
elif _type == str:
return String(default=value)
elif _type == bytes:
return Byte(default=value)
elif _type == bytearray:
return Binary(default=value)
elif _type == date:
return Date()
elif _type == time:
return Time()
elif _type == datetime:
return DateTime()
elif _type == list:
if len(value) == 0:
schema = Schema(nullable=True)
elif len(value) == 1:
schema = scheme(value[0])
else:
schema = Schema(oneOf=[scheme(x) for x in value])
return Array(schema)
elif _type == dict:
return Object(__recur(value))
else:
return Object(__recur(props(value)))
def media(value: Any) -> Dict[str, MediaType]:
media_types = value
if value is not dict:
media_types = {'*/*': value or {}}
return {x: MediaType(scheme(v)) for x, v in media_types.items()}
|
/sanic-openapi3-0.0.2.tar.gz/sanic-openapi3-0.0.2/sanic_openapi3/definitions.py
| 0.764364 | 0.226634 |
definitions.py
|
pypi
|
from .oas_types import * # pylint: disable=unused-wildcard-import, wildcard-import # <<-- here for users
module_tags: Dict[str, Tag] = {}
endpoints: Paths = Paths() # Note: this is really a Dict[Callable, PathItem] under the hood.
def deprecated():
"""Deprecate a route by marking it as `@doc.deprecated()`."""
def inner(func):
endpoints[func].x_deprecated_holder = True
return func
return inner
def exclude():
"""
Deprecate a route by marking them as `@doc.exclude()`.
"""
def inner(func):
endpoints[func].x_exclude = True
return func
return inner
def external_docs(url: str, description: Optional[str] = None): # pylint: disable=redefined-outer-name
"""
Add an externalDoc to the route. Note that some UIs do not show route/operation external_docs.
"""
def inner(func):
endpoints[func].x_external_docs_holder = ExternalDocumentation(url, description=description)
return func
return inner
def description(text: str):
"""
Add a description to the route by marking them `@doc.description("Descriptive text")`
"""
def inner(func):
endpoints[func].description = text
return func
return inner
# noinspection PyShadowingNames
def parameter( # pylint: disable=too-many-arguments
name: str,
_in="query",
description: Optional[str] = None, # pylint: disable=redefined-outer-name
required: Optional[bool] = None,
deprecated: bool = False, # pylint: disable=redefined-outer-name
allow_empty_value: Optional[bool] = None,
choices: Optional[List] = None,
style: Optional[str] = None,
explode: Optional[bool] = None,
allow_reserved: Optional[bool] = None,
schema: Optional[Union[Schema, Reference]] = None,
example: Optional[Any] = None,
examples: Optional[Dict[str, Union[Example, Reference]]] = None,
content: Optional[Dict[str, MediaType]] = None,
):
"""
Describes a single operation parameter.
A unique parameter is defined by a combination of a name and location.
Parameter Locations
-------------------
There are four possible parameter locations specified by the `location` ("in" in the spec) field:
- path - Used together with Path Templating, where the parameter value is actually part of the operation's URL.
This does not include the host or base path of the API. For example, in /items/{itemId}, the path
parameter is itemId.
- query - Parameters that are appended to the URL. For example, in /items?id=###, the query parameter is id.
- header - Custom headers that are expected as part of the request. Note that RFC7230 states header names are
case insensitive.
- cookie - Used to pass a specific cookie value to the API.
:param name: REQUIRED. The name of the parameter. Parameter names are case sensitive. If in is "path", the name
field MUST correspond to the associated path segment from the path field in the Paths Object. See Path
Templating for further information. If in is "header" and the name field is "Accept", "Content-Type" or
"Authorization", the parameter definition SHALL be ignored. For all other cases, the name corresponds to the
parameter name used by the in property.
:param _in: REQUIRED. The location of the parameter. Possible values are "query", "header", "path" or
"cookie".
:param description: A brief description of the parameter. This could contain examples of use. CommonMark syntax
MAY be used for rich text representation.
:param required: Determines whether this parameter is mandatory. If the parameter location is "path", this
property is REQUIRED and its value MUST be true. Otherwise, the property MAY be included and its default
value is false.
:param deprecated: Specifies that a parameter is deprecated and SHOULD be transitioned out of usage. Default
value is false.
:param allow_empty_value: Sets the ability to pass empty-valued parameters. This is valid only for query
parameters and allows sending a parameter with an empty value. Default value is false. If style is used, and
if behavior is n/a (cannot be serialized), the value of allowEmptyValue SHALL be ignored. Use of this
property is NOT RECOMMENDED, as it is likely to be removed in a later revision.
:param choices: Becomes the entries for schema.enum.
:param style: Describes how the parameter value will be serialized depending on the type of the parameter value.
Default values (based on value of ``location``):
* for query - form;
* for path - simple;
* for header - simple;
* for cookie - form.
:param explode: When this is true, parameter values of type array or object generate separate parameters for
each value of the array or key-value pair of the map. For other types of parameters this property has no
effect. When style is form, the default value is true. For all other styles, the default value is false.
:param allow_reserved: Determines whether the parameter value SHOULD allow reserved characters, as defined by
RFC3986 ``:/?#[]@!$&'()*+,;=`` to be included without percent-encoding. This property only applies to
parameters with an in value of query. The default value is false.
:param schema: The schema defining the type used for the parameter.
:param example: Example of the media type. The example SHOULD match the specified schema and encoding
properties if present. The example field is mutually exclusive of the examples field. Furthermore, if
referencing a schema which contains an example, the example value SHALL override the example provided by the
schema. To represent examples of media types that cannot naturally be represented in JSON or YAML, a string
value can contain the example with escaping where necessary.
:param examples: Examples of the media type. Each example SHOULD contain a value in the correct format as
specified in the parameter encoding. The examples field is mutually exclusive of the example field.
Furthermore, if referencing a schema which contains an example, the examples value SHALL override the
example provided by the schema.
:param content: A map containing the representations for the parameter. The key is the media type and the value
describes it. The map MUST only contain one entry.
"""
if _in == "path":
required = True
# NOTE: the `schema` being passed in is __probably__ one of the "class static" ones, like `Schema.Integer` or
# `Schema.Strings`. If so, we __really__ do not want to modify those "class static" objects, instead we
# must make sure to only ever modify a copy. Not getting this right was the cause of a Heisenbug ....
if schema is None:
if choices is None:
schema = Schema.String # A basic default
else:
choices0_type = Schema.get_enum_type(choices)
schema = Schema(_type=choices0_type, enum=choices)
else: # schema is not None
if choices is None:
pass # OK - nothing to do
else:
# both schema and choices
if isinstance(schema, Schema):
schema = schema.clone()
schema.add_enum(choices)
else:
raise ValueError("Cannot add choices to a Reference schema: define a new one with these choices.")
assert schema
def inner(func):
_parameter = Parameter(
name=name,
_in=_in,
description=description,
required=required,
deprecated=deprecated,
style=style,
explode=explode,
allow_reserved=allow_reserved,
allow_empty_value=allow_empty_value,
schema=schema,
example=example,
examples=examples,
content=content,
)
endpoints[func].parameters.append(_parameter)
return func
return inner
# noinspection PyShadowingNames
def request_body(
content: Dict[str, MediaType],
description: Optional[str] = None, # pylint: disable=redefined-outer-name
required: bool = False,
):
"""
Describes a single request body.
:param content: REQUIRED. The content of the request body. The key is a media type or media type range and the
value describes it. For requests that match multiple keys, only the most specific key is applicable. e.g.
``text/plain`` overrides ``text/*``
:param description: A brief description of the request body. This could contain examples of use. CommonMark
syntax MAY be used for rich text representation.
:param required: Determines if the request body is required in the request. Defaults to false.
"""
def inner(func):
_request_body = RequestBody(description=description, required=required, content=content,)
endpoints[func].request_body = _request_body
return func
return inner
# noinspection PyShadowingNames
def response(
status_code: Union[int, str],
description: str,
headers: Optional[Dict[str, Union[Header, Reference]]] = None,
content: Optional[Dict[str, MediaType]] = None,
links: Optional[Dict[str, Union[Link, Reference]]] = None,
): # pylint: disable=redefined-outer-name
"""
Add a response to the route.
"""
def inner(func):
if any((description, headers, content, links)):
endpoints[func].x_responses_holder[str(status_code)] = Response(
description=description, headers=headers, content=content, links=links
)
return func
return inner
def responses(
container: Union[
Dict[
Union[int, str],
Optional[
Dict[
str,
Union[
str,
Optional[Dict[str, Union[Header, Reference]]],
Optional[Dict[str, MediaType]],
Optional[Dict[str, Union[Link, Reference]]],
Optional[Reference],
],
]
],
],
Responses,
]
):
"""
A container for the expected responses of an operation. The container maps a HTTP response code to the expected
response.
The documentation is not necessarily expected to cover all possible HTTP response codes because they may not be
known in advance. However, documentation is expected to cover a successful operation response and any known errors.
"""
def inner(func):
if isinstance(container, Responses):
endpoints[func].x_responses_holder = container
else:
for status_code, opt_desc in container.items():
if opt_desc:
d = opt_desc.get("d") # pylint: disable=invalid-name
h = opt_desc.get("c") # pylint: disable=invalid-name
c = opt_desc.get("c") # pylint: disable=invalid-name
l = opt_desc.get("l") # pylint: disable=invalid-name
r = opt_desc.get("r") # pylint: disable=invalid-name
if any((d, h, c, l)):
assert not r, "You cannot combine `Reference`s in this `Response`."
if r:
endpoints[func].x_responses_holder[str(status_code)] = r
elif any((d, h, c, l)):
endpoints[func].x_responses_holder[str(status_code)] = Response(
description=d, headers=h, content=c, links=l
)
else:
endpoints[func].x_responses_holder[str(status_code)] = None
else:
endpoints[func].x_responses_holder[str(status_code)] = None
return func
return inner
def security(requirements: List[SecurityRequirement]):
"""
Lists the required security schemes to execute this operation. The name used for each property MUST correspond to a
security scheme declared in the Security Schemes under the Components Object.
Security Requirement Objects that contain multiple schemes require that all schemes MUST be satisfied for a request
to be authorized. This enables support for scenarios where multiple query parameters or HTTP headers are required to
convey security information.
When a list of Security Requirement Objects is defined on the OpenAPI Object or Operation Object, only one of the
Security Requirement Objects in the list needs to be satisfied to authorize the request.
`sanic-openapi3e hint: set your standard security requirements via `app.config.OPENAPI_SECURITY`; then override them
as needed with a `@doc.security` annotation. Use `@doc.security([])` to disable security for this route/operation.
"""
def inner(func):
endpoints[func].x_security_holder = requirements
return func
return inner
def servers(server_list: List[Server]):
"""
Add an alternative server array to service all operations in this path. Note that if you have not set the top-level
`Servers` (via `app.config.OPENAPI_SERVERS = [doc.Server(f"http://localhost:8000", "this server")]`) then this
path-level `Servers` will not be shown in Swagger.
"""
def inner(func):
endpoints[func].servers = server_list
return func
return inner
def summary(text):
"""
Add a summary to the route by marking them `@doc.summary("Summary text")`
"""
def inner(func):
endpoints[func].summary = text
return func
return inner
# noinspection PyShadowingNames
def tag(name, description=None): # pylint: disable=redefined-outer-name
"""
Add a tag - which gives Swagger grouping - to the route by marking them `@doc.tag("Tag", "Optional description")`
"""
def inner(func):
if name in module_tags:
if module_tags[name].description:
if description and module_tags[name].description != description:
msg = "Conflicting tag.description for tag `{}`: existing: `{}`, conflicting: `{}`".format(
name, module_tags[name].description, description
)
assert module_tags[name].description == description, msg
else:
module_tags[name] = Tag(name=name, description=description)
else:
module_tags[name] = Tag(name=name, description=description)
endpoints[func].x_tags_holder.append(module_tags[name])
return func
return inner
|
/sanic_openapi3e-0.9.10-py3-none-any.whl/sanic_openapi3e/doc.py
| 0.851799 | 0.496155 |
doc.py
|
pypi
|
from urllib.parse import quote as _unquote
class ClientIdError(Exception):
error = "Client ID Error"
description = "The client identifier (client_id) is missing or invalid."
class RedirectUriError(Exception):
error = "Redirect URI Error"
description = "The request fails due to a missing, invalid, or mismatching redirection URI (redirect_uri)."
class AuthorizeError(Exception):
_errors = {
# Oauth2 errors.
# https://tools.ietf.org/html/rfc6749#section-4.1.2.1
"invalid_request": "The request is otherwise malformed",
"unauthorized_client": "The client is not authorized to request an authorization code using this method",
"access_denied": "The resource owner or authorization server denied the request",
"unsupported_response_type": "The authorization server does not support obtaining an authorization"
" code using this method",
"invalid_scope": "The requested scope is invalid, unknown, or malformed",
"server_error": "The authorization server encountered an error",
"temporarily_unavailable": "The authorization server is currently unable to handle the request due "
"to a temporary overloading or maintenance of the server",
# OpenID errors.
# http://openid.net/specs/openid-connect-core-1_0.html#AuthError
"interaction_required": "The Authorization Server requires End-User interaction of some form to proceed",
"login_required": "The Authorization Server requires End-User authentication",
"account_selection_required": "The End-User is required to select a session at the Authorization Server",
"consent_required": "The Authorization Server requires End-User consent",
"invalid_request_uri": "The request_uri in the Authorization Request returns an error or contains invalid data",
"invalid_request_object": "The request parameter contains an invalid Request Object",
"request_not_supported": "The provider does not support use of the request parameter",
"request_uri_not_supported": "The provider does not support use of the request_uri parameter",
"registration_not_supported": "The provider does not support use of the registration parameter",
}
def __init__(self, redirect_uri, error, grant_type):
self.error = error
self.description = self._errors.get(error)
self.redirect_uri = redirect_uri
self.grant_type = grant_type
def create_uri(self, redirect_uri, state):
description = _unquote(self.description)
# See:
# http://openid.net/specs/openid-connect-core-1_0.html#ImplicitAuthError
hash_or_question = "#" if self.grant_type == "implicit" else "?"
uri = "{0}{1}error={2}&error_description={3}".format(redirect_uri, hash_or_question, self.error, description)
# Add state if present.
uri = uri + ("&state={0}".format(state) if state else "")
return uri
class TokenError(Exception):
"""
OAuth2 token endpoint errors.
https://tools.ietf.org/html/rfc6749#section-5.2
"""
_errors = {
"invalid_request": "The request is otherwise malformed",
"invalid_client": "Client authentication failed (e.g., unknown client, no client "
"authentication included, or unsupported authentication method)",
"invalid_grant": "The provided authorization grant or refresh token is invalid, expired, revoked, does not"
" match the redirection URI used in the authorization request, or "
"was issued to another client",
"unauthorized_client": "The authenticated client is not authorized to use this authorization grant type",
"unsupported_grant_type": "The authorization grant type is not supported by the authorization server",
"invalid_scope": "The requested scope is invalid, unknown, malformed, or exceeds the scope "
"granted by the resource owner",
}
def __init__(self, error):
self.error = error
self.description = self._errors.get(error)
def create_dict(self):
dic = {"error": self.error, "error_description": self.description}
return dic
class BearerTokenError(Exception):
"""
OAuth2 errors.
https://tools.ietf.org/html/rfc6750#section-3.1
"""
_errors = {
"invalid_request": ("The request is otherwise malformed", 400),
"invalid_token": (
"The access token provided is expired, revoked, malformed, or invalid for other reasons",
401,
),
"insufficient_scope": ("The request requires higher privileges than provided by the access token", 403),
}
def __init__(self, code):
self.code = code
error_tuple = self._errors.get(code, ("", ""))
self.description = error_tuple[0]
self.status = error_tuple[1]
class UserAuthError(Exception):
"""
Specific to the Resource Owner Password Credentials flow when
the Resource Owners credentials are not valid.
"""
error = "access_denied"
description = "The resource owner or authorization server denied the request."
def create_dict(self):
return {"error": self.error, "error_description": self.description}
|
/sanic_openid_connect_provider-0.9.0-py3-none-any.whl/sanic_openid_connect_provider/exceptions.py
| 0.742608 | 0.223271 |
exceptions.py
|
pypi
|
import base64
import binascii
import datetime
import hashlib
import logging
import uuid
from typing import TYPE_CHECKING, Any, AsyncGenerator, Dict, List, Optional, Tuple, Union
from sanic_openid_connect_provider.utils import masked
if TYPE_CHECKING:
from sanic_openid_connect_provider.models.clients import Client
logger = logging.getLogger("oicp")
class TokenStore(object):
def __init__(self, provider=None):
self._provider = provider
def set_provider(self, provider):
self._provider = provider
async def setup(self):
pass
def create_token(
self,
user: Dict[str, Any],
client: "Client",
auth_time: int,
scope: Tuple[str, ...],
expire_delta: int,
specific_claims: Dict[str, Any] = None,
id_token: Dict[str, Any] = None,
code: str = None,
) -> Dict[str, Any]:
access_token = uuid.uuid4().hex
hashed_access_token = hashlib.sha256(access_token.encode("ascii")).hexdigest().encode("ascii")
hashed_access_token = (
base64.urlsafe_b64encode(binascii.unhexlify(hashed_access_token[: len(hashed_access_token) // 2]))
.rstrip(b"=")
.decode("ascii")
)
return {
"user": user["username"],
"auth_time": auth_time,
"client": client.id,
"access_token": access_token,
"id_token": id_token,
"refresh_token": uuid.uuid4().hex,
"expires_at": int(datetime.datetime.now().timestamp() + expire_delta),
"scope": scope,
"at_hash": hashed_access_token,
"code": code,
"specific_claims": specific_claims,
}
def create_id_token(
self,
user: Dict[str, Any],
client: "Client",
auth_time: int,
expire_delta: int,
issuer: str,
nonce: Optional[str] = "",
at_hash="",
scope: List[str] = None,
specific_claims: Dict[str, Any] = None,
):
if scope is None:
scope = []
if specific_claims is None:
specific_claims = {}
# Convert datetimes into timestamps.
now = int(datetime.datetime.now().timestamp())
iat_time = now
exp_time = int(now + expire_delta)
# auth_time = int(user['auth_time'])
sub = user["username"]
if client.type == "pairwise":
sub = hashlib.sha256(sub.encode()).hexdigest()
dic = {
"iss": issuer,
"sub": sub,
"aud": client.id,
"exp": exp_time,
"iat": iat_time,
"auth_time": auth_time,
}
if nonce:
dic["nonce"] = str(nonce)
if at_hash:
dic["at_hash"] = at_hash
specific_claims = specific_claims.get("id_token", {}).keys()
claims = self._provider.users.get_claims_for_userdata_by_scope(user, scope, specific_claims)
dic.update(claims)
return dic
async def save_token(self, token: Dict[str, Any]):
raise NotImplementedError()
async def delete_token_by_access_token(self, access_key: str):
raise NotImplementedError()
async def delete_token_by_code(self, code: str):
raise NotImplementedError()
async def get_token_by_refresh_token(self, refresh_token: str) -> Dict[str, Any]:
raise NotImplementedError()
async def get_token_by_access_token(self, access_token: str) -> Union[Dict[str, Any], None]:
raise NotImplementedError()
async def all(self) -> AsyncGenerator[Dict[str, Any], None]:
if False: # For typing
yield {}
class InMemoryTokenStore(TokenStore):
def __init__(self, *args, **kwargs):
super(InMemoryTokenStore, self).__init__(*args, **kwargs)
self._store = {}
self._client_token_store = {}
async def save_token(self, token: Dict[str, Any]):
self._store[token["access_token"]] = token
logger.info("Saved token {0}".format(masked(token["access_token"])))
async def delete_token_by_access_token(self, access_token: str):
try:
del self._store[access_token]
logger.info("Deleted token {0}".format(masked(access_token)))
except KeyError:
pass
async def delete_token_by_code(self, code: str):
to_delete = []
for access_token, token in self._store.items():
if token.get("code") == code:
to_delete.append(access_token)
for access_token in to_delete:
del self._store[access_token]
logger.info("Deleted token {0}".format(masked(access_token)))
async def get_token_by_refresh_token(self, refresh_token: str) -> Union[Dict[str, Any], None]:
for value in self._store.values():
if value.get("refresh_token") == refresh_token:
return value
return None
async def get_token_by_access_token(self, access_token: str) -> Union[Dict[str, Any], None]:
try:
return self._store[access_token]
except KeyError:
return None
async def all(self) -> AsyncGenerator[Dict[str, Any], None]:
for value in self._store.values():
yield value
|
/sanic_openid_connect_provider-0.9.0-py3-none-any.whl/sanic_openid_connect_provider/models/token.py
| 0.772273 | 0.199776 |
token.py
|
pypi
|
import datetime
import logging
import uuid
from typing import TYPE_CHECKING, Any, AsyncGenerator, Dict, Tuple, Union
from sanic_openid_connect_provider.utils import masked
if TYPE_CHECKING:
from sanic_openid_connect_provider.models.clients import Client
logger = logging.getLogger("oicp")
class CodeStore(object):
def __init__(self, provider=None):
self._provider = provider
def set_provider(self, provider):
self._provider = provider
async def setup(self):
pass
async def create_code(
self,
client: "Client",
user: Dict[str, Any],
scopes: Tuple[str, ...],
code_expire: int,
nonce: str = None,
code_challenge: str = None,
code_challenge_method: str = None,
specific_claims: Dict[str, Any] = None,
):
if specific_claims is None:
specific_claims = {}
code = {
"used": False,
"user": user["username"],
"client": client.id,
"code": uuid.uuid4().hex,
"code_challenge": code_challenge,
"code_challenge_method": code_challenge_method,
"expires_at": int(datetime.datetime.now().timestamp() + code_expire),
"scope": scopes,
"nonce": nonce,
"specific_claims": specific_claims,
"auth_time": user["auth_time"],
}
await self._save_code(code)
return code
async def _save_code(self, code: Dict[str, Any]):
raise NotImplementedError()
async def get_by_id(self, id_: str) -> Union[Dict[str, Any], None]:
raise NotImplementedError()
async def mark_used_by_id(self, id_: str):
raise NotImplementedError()
async def all(self) -> AsyncGenerator[Dict[str, Any], None]:
if False: # For typing
yield {}
class InMemoryCodeStore(CodeStore):
def __init__(self, *args, **kwargs):
super(InMemoryCodeStore, self).__init__(*args, **kwargs)
self._store: Dict[str, Any] = {}
async def _save_code(self, code: Dict[str, Any]):
self._store[code["code"]] = code
logger.info("Saved code {0}".format(masked(code["code"])))
async def get_by_id(self, id_: str) -> Union[Dict[str, Any], None]:
try:
code = self._store[id_]
now = int(datetime.datetime.now().timestamp())
if now > code["expires_at"]:
del self._store[id_]
logger.info("Code expired, removing")
return None
return code
except KeyError:
pass
return None
async def mark_used_by_id(self, id_: str):
try:
code = self._store[id_]
code["used"] = True
logger.info("Marked code {0} as used".format(masked(code["code"])))
except KeyError:
pass
async def all(self) -> AsyncGenerator[Dict[str, Any], None]:
for value in self._store.values():
yield value
|
/sanic_openid_connect_provider-0.9.0-py3-none-any.whl/sanic_openid_connect_provider/models/code.py
| 0.773473 | 0.182426 |
code.py
|
pypi
|
import datetime
import logging
from typing import Any, Dict, KeysView, List, Union
import sanic.request
TEST_USER = {
"username": "testuser", # Required
"consent": False, # Required
"auth_time": datetime.datetime.now().timestamp(),
"name": "John Wick",
"given_name": "john",
"family_name": "wick",
"gender": "male",
"locale": "en-us",
"email": "[email protected]",
"email_verified": True,
"address": {
"formatted": "500 On Fire Hose, USA",
"street_address": "500 On Fire Hose",
"locality": "New York",
"region": "No clue",
"postal_code": "NY12354",
"country": "United States of America",
},
"phone_number": "07428555555",
"phone_number_verified": True,
}
SCOPES = {
"profile": [
"name",
"family_name",
"given_name",
"middle_name",
"nickname",
"preferred_username",
"profile",
"picture",
"website",
"gender",
"birthdate",
"zoneinfo",
"locale",
"updated_at",
],
"email": ["email", "email_verified"],
"address": ["address"],
"phone": ["phone_number", "phone_number_verified"],
}
logger = logging.getLogger("oicp")
class UserManager(object):
def __init__(self, provider=None):
self._provider = provider
def set_provider(self, provider):
self._provider = provider
async def setup(self):
pass
async def is_authenticated(self, request: sanic.request.Request) -> bool:
user_sess = request.ctx.session.get("user")
if user_sess:
max_age = int(
request.args.get("max_age", "0") if request.method == "GET" else request.form.get("max_age", "0")
)
# If they havent provided max_time, then your authed
if max_age > 0:
now = datetime.datetime.now().timestamp()
# If the time since you authed is greater than max_time, clear session
if (now - user_sess["auth_time"]) > max_age:
request.ctx.session.clear()
return False
else:
return True
else:
return True
return False
async def get_user(self, request: sanic.request.Request) -> Dict[str, Any]:
session_user = request.ctx.session["user"]
return self.user_data_to_claims(session_user)
async def get_user_by_username(self, username: str) -> Dict[str, Any]:
# Get this by other means
return self.user_data_to_claims(TEST_USER)
@classmethod
def clean_list(cls, dirty_list) -> List[Any]:
result = []
for item in dirty_list:
if isinstance(item, dict):
item = cls.clean_dict(item)
elif isinstance(item, (list, tuple, set)):
item = cls.clean_list(item)
if item:
result.append(item)
return result
@classmethod
def clean_dict(cls, dirty_dict: Dict[str, Any]) -> Dict[str, Any]:
"""
Strips out empty values
"""
result = {}
for key, value in dirty_dict.items():
if isinstance(value, str) and value == "":
continue
elif value is None:
continue
elif isinstance(value, dict):
result[key] = cls.clean_dict(value)
elif isinstance(value, (list, tuple, set)):
result[key] = cls.clean_list(value)
else:
result[key] = value
return result
@staticmethod
def user_data_to_claims(user_data: Any) -> Dict[str, Any]:
"""
Converts random format user_data is in, to a standardised format
Add any specific pieces of data here
"""
first_name = user_data["name"].split(" ", 1)[0]
last_name = user_data["name"].split(" ", 1)[-1]
return {
"username": user_data["username"],
"consent": user_data["consent"],
"auth_time": user_data["auth_time"],
"name": user_data["name"],
"given_name": first_name,
"family_name": last_name,
"gender": user_data["gender"],
"locale": user_data["locale"],
"email": user_data["email"],
"email_verified": user_data["email_verified"],
"address": {
"formatted": user_data["address"]["formatted"],
"street_address": user_data["address"]["street_address"],
"locality": user_data["address"]["locality"],
"region": user_data["address"]["region"],
"postal_code": user_data["address"]["postal_code"],
"country": user_data["address"]["country"],
},
"phone_number": user_data["phone_number"],
"phone_number_verified": user_data["phone_number_verified"],
"nickname": user_data.get("nickname"),
"profile": user_data.get("profile"),
"picture": user_data.get("picture"),
"website": user_data.get("website"),
"birthdate": user_data.get("birthdate"),
"zoneinfo": user_data.get("zoneinfo"),
"updated_at": user_data.get("updated_at"),
}
@staticmethod
def add_scopes(scopes: Dict[str, Any]):
for scope, claims in scopes.items():
if not isinstance(claims, (list, tuple, set)):
logging.error("Claims {0} is not a list".format(type(claims)))
continue
SCOPES[scope] = claims
logging.info("Added scope {0}: {1}".format(scope, claims))
async def get_claims_for_user_by_scope(
self, username: str, scopes: List[str], specific_claims: Union[List[str], KeysView]
) -> Dict[str, Any]:
user_data = await self.get_user_by_username(username)
return self.get_claims_for_userdata_by_scope(user_data, scopes, specific_claims)
def get_claims_for_userdata_by_scope(
self, user_data: Dict[str, Any], scopes: List[str], specific_claims: List[str]
) -> Dict[str, Any]:
result = {}
# Get all claims for the scope
for scope in scopes:
if scope == "openid":
continue
elif scope not in SCOPES:
logger.warning("Requested unknown scope {0}".format(scope))
continue
for claim in SCOPES[scope]:
try:
result[claim] = user_data[claim]
except KeyError:
pass
# Get some specific claims if they ask for them
for claim in specific_claims:
try:
result[claim] = user_data[claim]
except KeyError:
pass
return self.clean_dict(result)
|
/sanic_openid_connect_provider-0.9.0-py3-none-any.whl/sanic_openid_connect_provider/models/users.py
| 0.630344 | 0.2748 |
users.py
|
pypi
|
from sanic.exceptions import InvalidUsage
from sanic.constants import HTTP_METHODS
class HTTPMethodView:
"""Simple class based implementation of view for the sanic.
You should implement methods (get, post, put, patch, delete) for the class
to every HTTP method you want to support.
For example:
.. code-block:: python
class DummyView(HTTPMethodView):
def get(self, request, *args, **kwargs):
return text('I am get method')
def put(self, request, *args, **kwargs):
return text('I am put method')
etc.
If someone tries to use a non-implemented method, there will be a
405 response.
If you need any url params just mention them in method definition:
.. code-block:: python
class DummyView(HTTPMethodView):
def get(self, request, my_param_here, *args, **kwargs):
return text('I am get method with %s' % my_param_here)
To add the view into the routing you could use
1) app.add_route(DummyView.as_view(), '/')
2) app.route('/')(DummyView.as_view())
To add any decorator you could set it into decorators variable
"""
decorators = []
def dispatch_request(self, request, *args, **kwargs):
handler = getattr(self, request.method.lower(), None)
return handler(request, *args, **kwargs)
@classmethod
def as_view(cls, *class_args, **class_kwargs):
"""Return view function for use with the routing system, that
dispatches request to appropriate handler method.
"""
def view(*args, **kwargs):
self = view.view_class(*class_args, **class_kwargs)
return self.dispatch_request(*args, **kwargs)
if cls.decorators:
view.__module__ = cls.__module__
for decorator in cls.decorators:
view = decorator(view)
view.view_class = cls
view.__doc__ = cls.__doc__
view.__module__ = cls.__module__
view.__name__ = cls.__name__
return view
class CompositionView:
"""Simple method-function mapped view for the sanic.
You can add handler functions to methods (get, post, put, patch, delete)
for every HTTP method you want to support.
For example:
view = CompositionView()
view.add(['GET'], lambda request: text('I am get method'))
view.add(['POST', 'PUT'], lambda request: text('I am post/put method'))
etc.
If someone tries to use a non-implemented method, there will be a
405 response.
"""
def __init__(self):
self.handlers = {}
def add(self, methods, handler):
for method in methods:
if method not in HTTP_METHODS:
raise InvalidUsage(
'{} is not a valid HTTP method.'.format(method))
if method in self.handlers:
raise InvalidUsage(
'Method {} is already registered.'.format(method))
self.handlers[method] = handler
def __call__(self, request, *args, **kwargs):
handler = self.handlers[request.method.upper()]
return handler(request, *args, **kwargs)
|
/sanic-patched-0.4.1.tar.gz/sanic-patched-0.4.1/sanic/views.py
| 0.824709 | 0.165088 |
views.py
|
pypi
|
from cgi import parse_header
from collections import namedtuple
from http.cookies import SimpleCookie
from httptools import parse_url
from urllib.parse import parse_qs, urlunparse
try:
from ujson import loads as json_loads
except ImportError:
from json import loads as json_loads
from sanic.exceptions import InvalidUsage
from sanic.log import log
DEFAULT_HTTP_CONTENT_TYPE = "application/octet-stream"
# HTTP/1.1: https://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.2.1
# > If the media type remains unknown, the recipient SHOULD treat it
# > as type "application/octet-stream"
class RequestParameters(dict):
"""Hosts a dict with lists as values where get returns the first
value of the list and getlist returns the whole shebang
"""
def get(self, name, default=None):
"""Return the first value, either the default or actual"""
return super().get(name, [default])[0]
def getlist(self, name, default=None):
"""Return the entire list"""
return super().get(name, default)
class Request(dict):
"""Properties of an HTTP request such as URL, headers, etc."""
__slots__ = (
'app', 'headers', 'version', 'method', '_cookies', 'transport',
'body', 'parsed_json', 'parsed_args', 'parsed_form', 'parsed_files',
'_ip', '_parsed_url',
)
def __init__(self, url_bytes, headers, version, method, transport):
# TODO: Content-Encoding detection
self._parsed_url = parse_url(url_bytes)
self.app = None
self.headers = headers
self.version = version
self.method = method
self.transport = transport
# Init but do not inhale
self.body = []
self.parsed_json = None
self.parsed_form = None
self.parsed_files = None
self.parsed_args = None
self._cookies = None
@property
def json(self):
if self.parsed_json is None:
try:
self.parsed_json = json_loads(self.body)
except Exception:
if not self.body:
return None
raise InvalidUsage("Failed when parsing body as json")
return self.parsed_json
@property
def token(self):
"""Attempt to return the auth header token.
:return: token related to request
"""
auth_header = self.headers.get('Authorization')
if auth_header is not None:
return auth_header.split()[1]
return auth_header
@property
def form(self):
if self.parsed_form is None:
self.parsed_form = RequestParameters()
self.parsed_files = RequestParameters()
content_type = self.headers.get(
'Content-Type', DEFAULT_HTTP_CONTENT_TYPE)
content_type, parameters = parse_header(content_type)
try:
if content_type == 'application/x-www-form-urlencoded':
self.parsed_form = RequestParameters(
parse_qs(self.body.decode('utf-8')))
elif content_type == 'multipart/form-data':
# TODO: Stream this instead of reading to/from memory
boundary = parameters['boundary'].encode('utf-8')
self.parsed_form, self.parsed_files = (
parse_multipart_form(self.body, boundary))
except Exception:
log.exception("Failed when parsing form")
return self.parsed_form
@property
def files(self):
if self.parsed_files is None:
self.form # compute form to get files
return self.parsed_files
@property
def args(self):
if self.parsed_args is None:
if self.query_string:
self.parsed_args = RequestParameters(
parse_qs(self.query_string))
else:
self.parsed_args = RequestParameters()
return self.parsed_args
@property
def cookies(self):
if self._cookies is None:
cookie = self.headers.get('Cookie') or self.headers.get('cookie')
if cookie is not None:
cookies = SimpleCookie()
cookies.load(cookie)
self._cookies = {name: cookie.value
for name, cookie in cookies.items()}
else:
self._cookies = {}
return self._cookies
@property
def ip(self):
if not hasattr(self, '_ip'):
self._ip = self.transport.get_extra_info('peername')
return self._ip
@property
def scheme(self):
if self.app.websocket_enabled \
and self.headers.get('upgrade') == 'websocket':
scheme = 'ws'
else:
scheme = 'http'
if self.transport.get_extra_info('sslcontext'):
scheme += 's'
return scheme
@property
def host(self):
# it appears that httptools doesn't return the host
# so pull it from the headers
return self.headers.get('Host', '')
@property
def path(self):
return self._parsed_url.path.decode('utf-8')
@property
def query_string(self):
if self._parsed_url.query:
return self._parsed_url.query.decode('utf-8')
else:
return ''
@property
def url(self):
return urlunparse((
self.scheme,
self.host,
self.path,
None,
self.query_string,
None))
File = namedtuple('File', ['type', 'body', 'name'])
def parse_multipart_form(body, boundary):
"""Parse a request body and returns fields and files
:param body: bytes request body
:param boundary: bytes multipart boundary
:return: fields (RequestParameters), files (RequestParameters)
"""
files = RequestParameters()
fields = RequestParameters()
form_parts = body.split(boundary)
for form_part in form_parts[1:-1]:
file_name = None
file_type = None
field_name = None
line_index = 2
line_end_index = 0
while not line_end_index == -1:
line_end_index = form_part.find(b'\r\n', line_index)
form_line = form_part[line_index:line_end_index].decode('utf-8')
line_index = line_end_index + 2
if not form_line:
break
colon_index = form_line.index(':')
form_header_field = form_line[0:colon_index]
form_header_value, form_parameters = parse_header(
form_line[colon_index + 2:])
if form_header_field == 'Content-Disposition':
if 'filename' in form_parameters:
file_name = form_parameters['filename']
field_name = form_parameters.get('name')
elif form_header_field == 'Content-Type':
file_type = form_header_value
post_data = form_part[line_index:-4]
if file_name or file_type:
file = File(type=file_type, name=file_name, body=post_data)
if field_name in files:
files[field_name].append(file)
else:
files[field_name] = [file]
else:
value = post_data.decode('utf-8')
if field_name in fields:
fields[field_name].append(value)
else:
fields[field_name] = [value]
return fields, files
|
/sanic-patched-0.4.1.tar.gz/sanic-patched-0.4.1/sanic/request.py
| 0.579876 | 0.168412 |
request.py
|
pypi
|
class HierDict(object):
"""
This is the specialised dictionary that is used by the Sanic Plugin Toolkit
to manage Context objects. It can be hierarchical, and it searches its
parents if it cannot find an item in its own dictionary. It can create its
own children.
"""
__slots__ = ('_parent_hd', '_dict', '__weakref__')
@classmethod
def _iter_slots(cls):
use_cls = cls
bases = cls.__bases__
base_count = 0
while True:
if use_cls.__slots__:
for _s in use_cls.__slots__:
yield _s
if base_count >= len(bases):
break
use_cls = bases[base_count]
base_count += 1
return
def _inner(self):
"""
:return: the internal dictionary
:rtype: dict
"""
return object.__getattribute__(self, '_dict')
def __repr__(self):
_dict_repr = repr(self._inner())
return "HierDict({:s})".format(_dict_repr)
def __str__(self):
_dict_str = str(self._inner())
return "HierDict({:s})".format(_dict_str)
def __len__(self):
return len(self._inner())
def __setitem__(self, key, value):
# TODO: If key is in __slots__, ignore it and return
return self._inner().__setitem__(key, value)
def __getitem__(self, item):
try:
return self._inner().__getitem__(item)
except KeyError as e1:
parents_searched = [self]
parent = self._parent_hd
while parent:
try:
return parent._inner().__getitem__(item)
except KeyError:
parents_searched.append(parent)
# noinspection PyProtectedMember
next_parent = parent._parent_hd
if next_parent in parents_searched:
raise RuntimeError("Recursive HierDict found!")
parent = next_parent
raise e1
def __delitem__(self, key):
self._inner().__delitem__(key)
def __getattr__(self, item):
if item in self._iter_slots():
return object.__getattribute__(self, item)
try:
return self.__getitem__(item)
except KeyError as e:
raise AttributeError(*e.args)
def __setattr__(self, key, value):
if key in self._iter_slots():
if key == '__weakref__':
if value is None:
return
else:
raise ValueError("Cannot set weakrefs on Context")
return object.__setattr__(self, key, value)
try:
return self.__setitem__(key, value)
except Exception as e: # pragma: no cover
# what exceptions can occur on setting an item?
raise e
def __contains__(self, item):
return self._inner().__contains__(item)
def get(self, key, default=None):
try:
return self.__getattr__(key)
except (AttributeError, KeyError):
return default
def set(self, key, value):
try:
return self.__setattr__(key, value)
except Exception as e: # pragma: no cover
raise e
def items(self):
"""
A set-like read-only view HierDict's (K,V) tuples
:return:
:rtype: frozenset
"""
return self._inner().items()
def keys(self):
"""
An object containing a view on the HierDict's keys
:return:
:rtype: tuple # using tuple to represent an immutable list
"""
return self._inner().keys()
def values(self):
"""
An object containing a view on the HierDict's values
:return:
:rtype: tuple # using tuple to represent an immutable list
"""
return self._inner().values()
def replace(self, key, value):
"""
If this HierDict doesn't already have this key, it sets
the value on a parent HierDict if that parent has the key,
otherwise sets the value on this HierDict.
:param key:
:param value:
:return: Nothing
:rtype: None
"""
if key in self._inner().keys():
return self.__setitem__(key, value)
parents_searched = [self]
parent = self._parent_hd
while parent:
try:
if key in parent.keys():
return parent.__setitem__(key, value)
except (KeyError, AttributeError):
pass
parents_searched.append(parent)
# noinspection PyProtectedMember
next_parent = parent._parent_context
if next_parent in parents_searched:
raise RuntimeError("Recursive HierDict found!")
parent = next_parent
return self.__setitem__(key, value)
# noinspection PyPep8Naming
def update(self, E=None, **F):
"""
Update HierDict from dict/iterable E and F
:return: Nothing
:rtype: None
"""
if E is not None:
if hasattr(E, 'keys'):
for K in E:
self.replace(K, E[K])
elif hasattr(E, 'items'):
for K, V in E.items():
self.replace(K, V)
else:
for K, V in E:
self.replace(K, V)
for K in F:
self.replace(K, F[K])
def __new__(cls, parent, *args, **kwargs):
self = super(HierDict, cls).__new__(cls)
self._dict = dict(*args, **kwargs)
if parent is not None:
assert isinstance(parent, HierDict), "Parent context must be a valid initialised HierDict"
self._parent_hd = parent
else:
self._parent_hd = None
return self
def __init__(self, *args, **kwargs):
args = list(args)
args.pop(0) # remove parent
super(HierDict, self).__init__()
def __getstate__(self):
state_dict = {}
for s in HierDict.__slots__:
if s == "__weakref__":
continue
state_dict[s] = object.__getattribute__(self, s)
return state_dict
def __setstate__(self, state):
for s, v in state.items():
setattr(self, s, v)
def __reduce__(self):
state_dict = self.__getstate__()
_ = state_dict.pop('_stk_realm', None)
parent_context = state_dict.pop('_parent_hd')
return (HierDict.__new__, (self.__class__, parent_context), state_dict)
class SanicContext(HierDict):
__slots__ = ('_stk_realm',)
def __repr__(self):
_dict_repr = repr(self._inner())
return "SanicContext({:s})".format(_dict_repr)
def __str__(self):
_dict_str = str(self._inner())
return "SanicContext({:s})".format(_dict_str)
def create_child_context(self, *args, **kwargs):
return SanicContext(self._stk_realm, self, *args, **kwargs)
def __new__(cls, stk_realm, parent, *args, **kwargs):
if parent is not None:
assert isinstance(parent, SanicContext), "Parent context must be a valid initialised SanicContext"
self = super(SanicContext, cls).__new__(cls, parent, *args, **kwargs)
self._stk_realm = stk_realm
return self
def __init__(self, *args, **kwargs):
args = list(args)
# remove realm
_stk_realm = args.pop(0) # noqa: F841
super(SanicContext, self).__init__(*args)
def __getstate__(self):
state_dict = super(SanicContext, self).__getstate__()
for s in SanicContext.__slots__:
state_dict[s] = object.__getattribute__(self, s)
return state_dict
def __reduce__(self):
state_dict = self.__getstate__()
realm = state_dict.pop('_stk_realm')
parent_context = state_dict.pop('_parent_hd')
return (SanicContext.__new__, (self.__class__, realm, parent_context), state_dict)
def for_request(self, req):
# shortcut for context.request[id(req)]
requests_ctx = self.request
return requests_ctx[id(req)] if req else None
|
/sanic_plugin_toolkit-1.2.1-py3-none-any.whl/sanic_plugin_toolkit/context.py
| 0.535827 | 0.238074 |
context.py
|
pypi
|
from collections import namedtuple
from sanic_plugin_toolkit import SanicPlugin
from sanic_plugin_toolkit.plugin import SANIC_21_6_0, SANIC_21_9_0, SANIC_VERSION, FutureMiddleware, FutureRoute
ContextualizeAssociatedTuple = namedtuple('ContextualizeAssociatedTuple', ['plugin', 'reg'])
class ContextualizeAssociated(ContextualizeAssociatedTuple):
__slots__ = ()
# Decorator
def middleware(self, *args, **kwargs):
"""Decorate and register middleware
:param args: captures all of the positional arguments passed in
:type args: tuple(Any)
:param kwargs: captures the keyword arguments passed in
:type kwargs: dict(Any)
:return: The middleware function to use as the decorator
:rtype: fn
"""
kwargs.setdefault('priority', 5)
kwargs.setdefault('relative', None)
kwargs.setdefault('attach_to', None)
kwargs['with_context'] = True # This is the whole point of this plugin
plugin = self.plugin
reg = self.reg
if len(args) == 1 and callable(args[0]):
middle_f = args[0]
return plugin._add_new_middleware(reg, middle_f, **kwargs)
def wrapper(middle_f):
nonlocal plugin, reg
nonlocal args, kwargs
return plugin._add_new_middleware(reg, middle_f, *args, **kwargs)
return wrapper
def route(self, uri, *args, **kwargs):
"""Create a plugin route from a decorated function.
:param uri: endpoint at which the route will be accessible.
:type uri: str
:param args: captures all of the positional arguments passed in
:type args: tuple(Any)
:param kwargs: captures the keyword arguments passed in
:type kwargs: dict(Any)
:return: The exception function to use as the decorator
:rtype: fn
"""
if len(args) == 0 and callable(uri):
raise RuntimeError("Cannot use the @route decorator without " "arguments.")
kwargs.setdefault('methods', frozenset({'GET'}))
kwargs.setdefault('host', None)
kwargs.setdefault('strict_slashes', False)
kwargs.setdefault('stream', False)
kwargs.setdefault('name', None)
kwargs.setdefault('version', None)
kwargs.setdefault('ignore_body', False)
kwargs.setdefault('websocket', False)
kwargs.setdefault('subprotocols', None)
kwargs.setdefault('unquote', False)
kwargs.setdefault('static', False)
if SANIC_21_6_0 <= SANIC_VERSION:
kwargs.setdefault('version_prefix', '/v')
if SANIC_21_9_0 <= SANIC_VERSION:
kwargs.setdefault('error_format', None)
kwargs['with_context'] = True # This is the whole point of this plugin
plugin = self.plugin
reg = self.reg
def wrapper(handler_f):
nonlocal plugin, reg
nonlocal uri, args, kwargs
return plugin._add_new_route(reg, uri, handler_f, *args, **kwargs)
return wrapper
def listener(self, event, *args, **kwargs):
"""Create a listener from a decorated function.
:param event: Event to listen to.
:type event: str
:param args: captures all of the positional arguments passed in
:type args: tuple(Any)
:param kwargs: captures the keyword arguments passed in
:type kwargs: dict(Any)
:return: The function to use as the listener
:rtype: fn
"""
if len(args) == 1 and callable(args[0]):
raise RuntimeError("Cannot use the @listener decorator without " "arguments")
kwargs['with_context'] = True # This is the whole point of this plugin
plugin = self.plugin
reg = self.reg
def wrapper(listener_f):
nonlocal plugin, reg
nonlocal event, args, kwargs
return plugin._add_new_listener(reg, event, listener_f, *args, **kwargs)
return wrapper
def websocket(self, uri, *args, **kwargs):
"""Create a websocket route from a decorated function
# Deprecated. Use @contextualize.route("/path", websocket=True)
"""
kwargs["websocket"] = True
kwargs["with_context"] = True # This is the whole point of this plugin
return self.route(uri, *args, **kwargs)
class Contextualize(SanicPlugin):
__slots__ = ()
AssociatedTuple = ContextualizeAssociated
def _add_new_middleware(self, reg, middle_f, *args, **kwargs):
# A user should never call this directly.
# it should be called only by the AssociatedTuple
assert reg in self.registrations
(realm, p_name, url_prefix) = reg
context = self.get_context_from_realm(reg)
# This is how we add a new middleware _after_ the plugin is registered
m = FutureMiddleware(middle_f, args, kwargs)
realm._register_middleware_helper(m, realm, self, context)
return middle_f
def _add_new_route(self, reg, uri, handler_f, *args, **kwargs):
# A user should never call this directly.
# it should be called only by the AssociatedTuple
assert reg in self.registrations
(realm, p_name, url_prefix) = reg
context = self.get_context_from_realm(reg)
# This is how we add a new route _after_ the plugin is registered
r = FutureRoute(handler_f, uri, args, kwargs)
realm._register_route_helper(r, realm, self, context, p_name, url_prefix)
return handler_f
def _add_new_listener(self, reg, event, listener_f, *args, **kwargs):
# A user should never call this directly.
# it should be called only by the AssociatedTuple
assert reg in self.registrations
(realm, p_name, url_prefix) = reg
context = self.get_context_from_realm(reg)
# This is how we add a new listener _after_ the plugin is registered
realm._plugin_register_listener(event, listener_f, self, context, *args, **kwargs)
return listener_f
# Decorator
def middleware(self, *args, **kwargs):
"""Decorate and register middleware
:param args: captures all of the positional arguments passed in
:type args: tuple(Any)
:param kwargs: captures the keyword arguments passed in
:type kwargs: dict(Any)
:return: The middleware function to use as the decorator
:rtype: fn
"""
kwargs.setdefault('priority', 5)
kwargs.setdefault('relative', None)
kwargs.setdefault('attach_to', None)
kwargs['with_context'] = True # This is the whole point of this plugin
if len(args) == 1 and callable(args[0]):
middle_f = args[0]
return super(Contextualize, self).middleware(middle_f, **kwargs)
def wrapper(middle_f):
nonlocal self, args, kwargs
return super(Contextualize, self).middleware(*args, **kwargs)(middle_f)
return wrapper
# Decorator
def route(self, uri, *args, **kwargs):
"""Create a plugin route from a decorated function.
:param uri: endpoint at which the route will be accessible.
:type uri: str
:param args: captures all of the positional arguments passed in
:type args: tuple(Any)
:param kwargs: captures the keyword arguments passed in
:type kwargs: dict(Any)
:return: The exception function to use as the decorator
:rtype: fn
"""
if len(args) == 0 and callable(uri):
raise RuntimeError("Cannot use the @route decorator without arguments.")
kwargs.setdefault('methods', frozenset({'GET'}))
kwargs.setdefault('host', None)
kwargs.setdefault('strict_slashes', False)
kwargs.setdefault('stream', False)
kwargs.setdefault('name', None)
kwargs.setdefault('version', None)
kwargs.setdefault('ignore_body', False)
kwargs.setdefault('websocket', False)
kwargs.setdefault('subprotocols', None)
kwargs.setdefault('unquote', False)
kwargs.setdefault('static', False)
kwargs['with_context'] = True # This is the whole point of this plugin
def wrapper(handler_f):
nonlocal self, uri, args, kwargs
return super(Contextualize, self).route(uri, *args, **kwargs)(handler_f)
return wrapper
# Decorator
def listener(self, event, *args, **kwargs):
"""Create a listener from a decorated function.
:param event: Event to listen to.
:type event: str
:param args: captures all of the positional arguments passed in
:type args: tuple(Any)
:param kwargs: captures the keyword arguments passed in
:type kwargs: dict(Any)
:return: The exception function to use as the listener
:rtype: fn
"""
if len(args) == 1 and callable(args[0]):
raise RuntimeError("Cannot use the @listener decorator without arguments")
kwargs['with_context'] = True # This is the whole point of this plugin
def wrapper(listener_f):
nonlocal self, event, args, kwargs
return super(Contextualize, self).listener(event, *args, **kwargs)(listener_f)
return wrapper
def websocket(self, uri, *args, **kwargs):
"""Create a websocket route from a decorated function
# Deprecated. Use @contextualize.route("/path",websocket=True)
"""
kwargs["websocket"] = True
kwargs["with_context"] = True # This is the whole point of this plugin
return self.route(uri, *args, **kwargs)
def __init__(self, *args, **kwargs):
super(Contextualize, self).__init__(*args, **kwargs)
instance = contextualize = Contextualize()
|
/sanic_plugin_toolkit-1.2.1-py3-none-any.whl/sanic_plugin_toolkit/plugins/contextualize.py
| 0.879237 | 0.222204 |
contextualize.py
|
pypi
|
from peewee import Model as PeeweeModel
from peewee import ModelBase
class Signal(object):
"""
Simplest signals implementation for Peewee ORM.
"""
__slots__ = 'receivers'
def __init__(self):
"""
Initialize the signal.
"""
self.receivers = []
def connect(self, receiver):
"""
Append receiver.
"""
if not callable(receiver):
raise ValueError('Invalid receiver: %s' % receiver)
self.receivers.append(receiver)
def __call__(self, receiver):
"""
Support decorators.
"""
self.connect(receiver)
return receiver
def disconnect(self, receiver):
"""
Remove receiver.
"""
try:
self.receivers.remove(receiver)
except ValueError:
raise ValueError('Unknown receiver: %s' % receiver)
def send(self, instance, *args, **kwargs):
"""
Send signal.
"""
for receiver in self.receivers:
receiver(instance, *args, **kwargs)
class BaseSignalModel(ModelBase):
"""
Special metaclass that provides an opportunity to use pre/post signals
with instances of a model.
"""
models = []
def __new__(mcs, name, bases, attrs):
cls = super(BaseSignalModel, mcs).__new__(mcs, name, bases, attrs)
cls.pre_save = Signal()
cls.pre_delete = Signal()
cls.post_delete = Signal()
cls.post_save = Signal()
if cls._meta.table_name and cls._meta.table_name != 'model':
mcs.models.append(cls)
cls._meta.read_slaves = getattr(cls._meta, 'read_slaves', None)
return cls
class Model(PeeweeModel, metaclass=BaseSignalModel):
@classmethod
def select(cls, *args, **kwargs):
"""
Support read slaves.
"""
query = super(Model, cls).select(*args, **kwargs)
query.database = cls._get_read_database()
return query
@classmethod
def raw(cls, *args, **kwargs):
"""
Send a raw SQL query to the database. If was specified
the `select` operator, then the query will be sent to
the a suitable slave node.
"""
query = super(Model, cls).raw(*args, **kwargs)
if query._sql.lower().startswith('select'):
query.database = cls._get_read_database()
return query
@property
def pk(self):
"""
Return primary key value.
"""
return self._get_pk_value()
@classmethod
def get_or_none(cls, *args, **kwargs):
try:
return cls.get(*args, **kwargs)
except cls.DoesNotExist:
return None
def save(self, force_insert=False, **kwargs):
"""
Invoke pre- and post-signals during saves.
"""
created = force_insert or not bool(self.pk)
self.pre_save.send(self, created=created)
super(Model, self).save(force_insert=force_insert, **kwargs)
self.post_save.send(self, created=created)
def delete_instance(self, *args, **kwargs):
"""
Invoke pre- and post-signals during deleting an object.
"""
self.pre_delete.send(self)
super(Model, self).delete_instance(*args, **kwargs)
self.post_delete.send(self)
@classmethod
def _get_read_database(cls):
if not cls._meta.read_slaves:
return cls._meta.database
current_idx = getattr(cls, '_read_slave_idx', -1)
cls._read_slave_idx = (current_idx + 1) % len(cls._meta.read_slaves)
return cls._meta.read_slaves[cls._read_slave_idx]
|
/sanic-pw-0.2.0.tar.gz/sanic-pw-0.2.0/sanic_pw/models.py
| 0.811713 | 0.178383 |
models.py
|
pypi
|
import time
from typing import Union, Any
from srf.cache.backends import cache as default_cache
from srf.exceptions import Throttled
class BaseThrottle:
"""
Rate throttling of requests.
"""
async def allow_request(self, request, view):
"""
Return `True` if the request should be allowed, `False` otherwise.
"""
raise NotImplementedError('.allow_request() must be overridden')
async def get_ident(self, request):
"""
Use HTTP_X_FORWARDED_FOR to get REMOTE_ADDR,
or use request.ip if it doesn't exist.
"""
xff = request.headers.get('HTTP_X_FORWARDED_FOR', None)
remote_addr = request.ip
return ''.join(xff.split()) if xff else remote_addr
async def wait(self):
"""
Optionally, return a recommended number of seconds to wait before
the next request.
"""
return None
class SimpleRateThrottle(BaseThrottle):
"""
A simple cache implementation, that only requires `.get_cache_key()`
to be overridden.
The rate (requests / seconds) is set by a `rate` attribute on the Throttle
class. The attribute is a string of the form 'number_of_requests/period'.
Period should be one of: ('s', 'sec', 'm', 'min', 'h', 'hour', 'd', 'day')
Previous request information used for throttling is stored in the cache.
"""
cache = default_cache
timer = time.time
cache_format = 'throttle_%(scope)s_%(ident)s'
rate = '100/min'
def __init__(self, rate=None):
if rate is not None:
self.rate = rate
self.num_requests, self.duration = self.parse_rate(self.rate)
async def get_cache_key(self, request, view):
"""
Should return a unique cache-key which can be used for throttling.
Must be overridden.
May return `None` if the request should not be throttled.
"""
return await self.get_ident(request)
def parse_rate(self, rate) -> Union[tuple[None, None], tuple[int, Any]]:
"""
Given the request rate string, return a two tuple of:
<allowed number of requests>, <period of time in seconds>
"""
if rate is None:
return None, None
num, period = rate.split('/')
num_requests = int(num)
duration = {'s': 1, 'm': 60, 'h': 3600, 'd': 86400}[period[0]]
return num_requests, duration
async def allow_request(self, request, view):
"""
Implement the check to see if the request should be throttled.
On success calls `throttle_success`.
On failure calls `throttle_failure`.
"""
if self.rate is None:
return True
self.key = await self.get_cache_key(request, view)
if self.key is None:
return True
self.history = await self.cache.get(self.key, [])
self.now = self.timer()
# Drop any requests from the history which have now passed the
# throttle duration
while self.history and self.history[-1] <= self.now - self.duration:
self.history.pop()
if len(self.history) >= self.num_requests:
msg = 'Too many requests. Please try again later, Expected available in {wait} second.'
raise Throttled(message=msg.format(wait=await self.wait()))
self.history.insert(0, self.now)
await self.cache.set(self.key, self.history, self.duration)
return True
async def wait(self):
"""
Returns the recommended next request time in seconds.
"""
if self.history:
remaining_duration = self.duration - (self.now - self.history[-1])
else:
remaining_duration = self.duration
available_requests = self.num_requests - len(self.history) + 1
if available_requests <= 0:
return None
return int(remaining_duration / float(available_requests))
|
/sanic-rest-framework-1.2.3.tar.gz/sanic-rest-framework-1.2.3/srf/throttling.py
| 0.896782 | 0.163279 |
throttling.py
|
pypi
|
from tortoise.models import Q
from srf.constant import LOOKUP_SEP
__all__ = ('ORMAndFilter', 'ORMOrFilter')
from srf.openapi.openapi import Parameters, Parameter
class ORMAndFilter:
"""以And进行查询
该类将直接得到 ORM_Filter
"""
lookup_prefixes = {
'^': 'istartswith',
'$': 'iendswith',
'>': 'gt',
'<': 'lt',
'>=': 'gte',
'<=': 'lte',
'=': 'contains',
'@': 'icontains'
}
def __init__(self, request, view):
"""
:param request: 当前请求
:param view: 当前视图
"""
self.view = view
self.request = request
def get_search_fields(self):
"""
搜索字段是从视图获取的,但请求始终是
传递给此方法。子类可以重写此方法以
根据请求内容动态更改搜索字段。
"""
return getattr(self.view, 'search_fields', None)
@property
def orm_filter(self):
"""
根据定义的搜索字段过滤传入的queryset
:return: Q object
"""
orm_filters = []
search_fields = self.get_search_fields()
if not search_fields:
return Q(*orm_filters)
orm_filters.extend(Q(**self.construct_orm_filter(search_field)) for search_field in search_fields)
return Q(*orm_filters)
def dismantle_search_field(self, search_field):
"""
拆解带有特殊字符的搜索字段
:param search_field: 搜索字段
:return: (field_name, lookup_suffix)
"""
lookup_suffix_keys = list(self.lookup_prefixes.keys())
lookup_suffix = None
field_name = search_field
for lookup_suffix_key in lookup_suffix_keys:
if lookup_suffix_key in search_field:
lookup_suffix = self.lookup_prefixes[lookup_suffix_key]
field_name = search_field[len(lookup_suffix_key):]
return field_name, lookup_suffix
return field_name, lookup_suffix
def construct_orm_filter(self, search_field):
"""
构造适用于orm的过滤参数
:param search_field: 搜索字段
:return:
"""
field_name, lookup_suffix = self.dismantle_search_field(search_field)
args = self.request.args
if field_name not in args:
return {}
if lookup_suffix:
orm_lookup = LOOKUP_SEP.join([field_name, lookup_suffix])
else:
orm_lookup = field_name
return {orm_lookup: self.get_filter_value(field_name)}
def get_filter_value(self, field_name):
"""
根据字段名从请求中得到值
:param field_name: 字段名
:return:
"""
values = self.request.args.get(field_name)
return ''.join(values)
@staticmethod
def parameters(view) -> list:
parameters = Parameters()
search_fields = getattr(view, 'search_fields', None)
ret = []
if search_fields is None:
return ret
for search_field in search_fields:
lookup_suffix_keys = list(ORMAndFilter.lookup_prefixes.keys())
field_name = search_field
for lookup_suffix_key in lookup_suffix_keys:
if lookup_suffix_key in search_field:
field_name = search_field[len(lookup_suffix_key):]
parameters.add(Parameter(field_name, 'string'))
return parameters.parameters
class ORMOrFilter(ORMAndFilter):
"""以And进行查询
该类将直接得到 ORM_Filter
"""
@property
def orm_filter(self):
"""
根据定义的搜索字段过滤传入的queryset
:return: Q object
"""
orm_filters = []
search_fields = self.get_search_fields()
if not search_fields:
return orm_filters
orm_filters.extend(Q(**self.construct_orm_filter(search_field)) for search_field in search_fields)
return Q(*orm_filters, join_type=Q.OR)
|
/sanic-rest-framework-1.2.3.tar.gz/sanic-rest-framework-1.2.3/srf/filters.py
| 0.595493 | 0.202581 |
filters.py
|
pypi
|
import copy
from typing import Dict
from srf.exceptions import ValidationException, ValidatorAssertError
__all__ = ['BaseValidator', 'MaxLengthValidator', 'MinLengthValidator', 'MaxValueValidator', 'MinValueValidator']
class BaseValidator:
"""验证器基类
所有通用验证器都需要继承本类,
在调用 __call__ 时抛出 ValidationException 错误
即代表验证失败
"""
default_error_messages: Dict[str, str] = {
}
def __init__(self, error_messages: Dict[str, str] = None, code=None):
self.error_messages = copy.copy(self.default_error_messages)
if error_messages is not None:
self.error_messages.update(copy.copy(error_messages))
self.code = code
def __call__(self, value, serializer=None):
raise NotImplementedError('验证器必须重新定义 __call__()')
def raise_error(self, key, **kws):
msg = self.default_error_messages[key].format(**kws)
raise ValidationException(msg, code=key)
class MaxLengthValidator(BaseValidator):
default_error_messages: Dict[str, str] = {
'max_length': '超出长度,最长支持{max_length}',
'invalid': '无效的数据类型,数据类型只支持{datatypes}'
}
def __init__(self, max_length, **kwargs):
if not isinstance(max_length, (int, float)):
raise ValidatorAssertError('max_length的值只支持数值类型')
self.max_length = max_length
super(MaxLengthValidator, self).__init__(**kwargs)
def __call__(self, value, serializer=None):
if not isinstance(value, (str, list, dict, type)):
self.raise_error('invalid', datatypes='str, list, dict, type')
if len(value) > self.max_length:
self.raise_error('max_length', max_length=self.max_length)
class MinLengthValidator(BaseValidator):
default_error_messages: Dict[str, str] = {
'min_length': '低于最低长度,最低为 {min_length}',
'invalid': '无效的数据类型,数据类型只支持 {datatypes} '
}
def __init__(self, min_length, **kwargs):
if not isinstance(min_length, (int, float)):
raise ValidatorAssertError('min_length的值只支持数值类型')
self.min_length = min_length
super(MinLengthValidator, self).__init__(**kwargs)
def __call__(self, value, serializer=None):
if not isinstance(value, (str, list, dict, type)):
self.raise_error('invalid', datatypes='str, list, dict, type')
if len(value) < self.min_length:
self.raise_error('min_length', min_length=self.min_length)
class MaxValueValidator(BaseValidator):
default_error_messages: Dict[str, str] = {
'max_value': '超出最大值,最大值支持到{max_value}',
'invalid': '无效的数据类型,数据类型只支持{datatypes}'
}
def __init__(self, max_value, **kwargs):
if not isinstance(max_value, (int, float)):
raise ValidatorAssertError('max_value的值只支持数值类型')
self.max_value = max_value
super(MaxValueValidator, self).__init__(**kwargs)
def __call__(self, value, serializer=None):
if not isinstance(value, (int, float)):
self.raise_error('invalid', datatypes='int, float')
if value > self.max_value:
self.raise_error('max_value', max_value=self.max_value)
class MinValueValidator(BaseValidator):
default_error_messages: Dict[str, str] = {
'min_value': '低于最小值,最小值至少要为{min_value}',
'invalid': '无效的数据类型,数据类型只支持{datatypes}'
}
def __init__(self, min_value, **kwargs):
if not isinstance(min_value, (int, float)):
raise ValidatorAssertError('min_value的值只支持数值类型')
self.min_value = min_value
super(MinValueValidator, self).__init__(**kwargs)
def __call__(self, value, serializer=None):
if not isinstance(value, (int, float)):
self.raise_error('invalid', datatypes='int, float')
if value < self.min_value:
self.raise_error('min_value', min_value=self.min_value)
|
/sanic-rest-framework-1.2.3.tar.gz/sanic-rest-framework-1.2.3/srf/validators.py
| 0.617974 | 0.212191 |
validators.py
|
pypi
|
import datetime
import functools
import inspect
from decimal import Decimal
from urllib import parse
from tortoise.exceptions import IntegrityError
from srf.exceptions import APIException
_PROTECTED_TYPES = (
type(None), int, float, Decimal, datetime.datetime, datetime.date, datetime.time,
)
class ObjectDict(dict):
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
if isinstance(value, dict):
self[key] = ObjectDict(value)
self[key] = value
def is_protected_type(obj):
"""确定对象实例是否为受保护的类型。
受保护类型的对象在传递给时会原样保留
force_str(strings_only = True)。
"""
return isinstance(obj, _PROTECTED_TYPES)
def force_str(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
与smart_str()类似,除了将懒实例解析为
字符串,而不是保留为惰性对象。
如果strings_only为True,请不要转换(某些)非字符串类对象。
"""
# 出于性能原因,请先处理常见情况。
if issubclass(type(s), str):
return s
if strings_only and is_protected_type(s):
return s
try:
s = str(s, encoding, errors) if isinstance(s, bytes) else str(s)
except UnicodeDecodeError as e:
raise APIException('{value}出现解码错误'.format(value=s))
return s
def replace_query_param(url, key, val):
"""
给定一个URL和一个键/值对,在URL的查询参数中设置或替换一个项目,然后返回新的URL。
"""
(scheme, netloc, path, query, fragment) = parse.urlsplit(force_str(url))
query_dict = parse.parse_qs(query, keep_blank_values=True)
query_dict[force_str(key)] = [force_str(val)]
query = parse.urlencode(sorted(list(query_dict.items())), doseq=True)
return parse.urlunsplit((scheme, netloc, path, query, fragment))
class IntegrityErrorHandel:
def __init__(self, exc: IntegrityError):
self.exc = exc
self.message = str(exc)
def parse_error_str(self):
error = '发生错误:{}{}'
if 'UNIQUE' in self.message:
field_name = self.message.split('.')[-1]
error = error.format(field_name, '已存在')
return error
def __str__(self):
return self.parse_error_str()
async def run_awaitable(func, *args, **kwargs):
return await func(*args, **kwargs) if inspect.iscoroutinefunction(func) else func(*args, **kwargs)
async def run_awaitable_val(value):
return await value if inspect.isawaitable(value) else value
def is_callable(obj):
return bool(inspect.isfunction(obj) or inspect.ismethod(obj) or isinstance(obj, functools.partial))
|
/sanic-rest-framework-1.2.3.tar.gz/sanic-rest-framework-1.2.3/srf/utils.py
| 0.413596 | 0.218857 |
utils.py
|
pypi
|
from srf import mixins
from srf.exceptions import APIException
from srf.filters import ORMAndFilter
from srf.status import HttpStatus
from srf.views import APIView
__all__ = ('GenericAPIView', 'CreateAPIView', 'ListAPIView', 'RetrieveAPIView',
'DestroyAPIView', 'UpdateAPIView', 'ListCreateAPIView', 'RetrieveUpdateAPIView',
'RetrieveDestroyAPIView', 'RetrieveUpdateDestroyAPIView')
class GenericAPIView(APIView):
"""
视图集视图,可以配合Mixin实现复杂的视图集,
数据来源基于模型查询集,可以配合Route组件实现便捷的路由管理
"""
# 数据来源
queryset = None
# 数据序列化器
serializer_class = None
# 主键
lookup_field = 'pk'
# 分页器
pagination_class = None
# 过滤器
filter_class = ORMAndFilter
search_fields = None
def __init__(self, *args, **kwargs):
super().__init__(args, kwargs)
async def get_object(self):
"""
返回视图显示的对象。
如果您需要提供非标准的内容,则可能要覆盖此设置
queryset查找。
"""
queryset = await self.get_queryset()
lookup_field = self.lookup_field
assert lookup_field in self.kwargs, f'{lookup_field} 不存在于 {self.__class__.__name__} 的 Url配置中的关键词内 '
filter_kwargs = {lookup_field: self.kwargs[lookup_field]}
obj = await queryset.get_or_none(**filter_kwargs)
if obj is None:
raise APIException(f'不存在{lookup_field}为{self.kwargs[lookup_field]}的数据', status=HttpStatus.HTTP_200_OK)
# May raise a permission denied
await self.check_object_permissions(self.request, obj)
return obj
async def get_queryset(self):
assert self.queryset is not None, (
"'%s'应该包含一个'queryset'属性,"
"或重写`get_queryset()`方法。"
% self.__class__.__name__
)
queryset = self.queryset
filter_orm = await self.filter_orm()
queryset = queryset.filter(filter_orm)
return queryset
async def filter_orm(self):
"""得到ORM过滤参数"""
return self.filter_class(self.request, self).orm_filter
def get_serializer(self, *args, **kwargs):
"""
返回应该用于验证和验证的序列化程序实例
对输入进行反序列化,并对输出进行序列化。
"""
serializer_class = self.get_serializer_class()
kwargs.setdefault('context', self.get_serializer_context())
return serializer_class(*args, **kwargs)
def get_serializer_class(self):
"""
返回用于序列化器的类。
默认使用`self.serializer_class`。
如果您需要提供其他信息,则可能要覆盖此设置
序列化取决于传入的请求。
(例如,管理员获得完整的序列化,其他获得基本的序列化)
"""
assert self.serializer_class is not None, (
"'%s' should either include a `serializer_class` attribute, "
"or override the `get_serializer_class()` method."
% self.__class__.__name__
)
return self.serializer_class
def get_serializer_context(self):
"""
提供给序列化程序类的额外上下文。
"""
return {
'request': self.request,
'view': self
}
@property
def paginator(self):
"""
The paginator instance associated with the view, or `None`.
"""
if not hasattr(self, '_paginator'):
if self.pagination_class is None:
self._paginator = None
else:
self._paginator = self.pagination_class()
return self._paginator
async def paginate_queryset(self, queryset):
"""
Return a single page of results, or `None` if pagination is disabled.
"""
if self.paginator is None:
return None
return await self.paginator.paginate_queryset(queryset, self.request, view=self)
async def get_paginated_response(self, data):
assert self.paginator is not None
return await self.paginator.get_paginated_response(data)
class CreateAPIView(mixins.CreateModelMixin,
GenericAPIView):
"""
用于创建模型实例的具体视图。
"""
async def post(self, request, *args, **kwargs):
return await self.create(request, *args, **kwargs)
class ListAPIView(mixins.ListModelMixin,
GenericAPIView):
"""
列出查询集的具体视图。
"""
async def get(self, request, *args, **kwargs):
return await self.list(request, *args, **kwargs)
class RetrieveAPIView(mixins.RetrieveModelMixin,
GenericAPIView):
"""
用于检索模型实例的具体视图。
"""
async def get(self, request, *args, **kwargs):
return await self.retrieve(request, *args, **kwargs)
class DestroyAPIView(mixins.DestroyModelMixin,
GenericAPIView):
"""
用于删除模型实例的具体视图。
"""
async def delete(self, request, *args, **kwargs):
return await self.destroy(request, *args, **kwargs)
class UpdateAPIView(mixins.UpdateModelMixin,
GenericAPIView):
"""
用于更新模型实例的具体视图。
"""
async def put(self, request, *args, **kwargs):
return await self.update(request, *args, **kwargs)
async def patch(self, request, *args, **kwargs):
return await self.partial_update(request, *args, **kwargs)
class ListCreateAPIView(mixins.ListModelMixin,
mixins.CreateModelMixin,
GenericAPIView):
"""
用于列出查询集或创建模型实例的具体视图。
"""
async def get(self, request, *args, **kwargs):
return await self.list(request, *args, **kwargs)
async def post(self, request, *args, **kwargs):
return await self.create(request, *args, **kwargs)
class RetrieveUpdateAPIView(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
GenericAPIView):
"""
用于检索、更新模型实例的具体视图。
"""
async def get(self, request, *args, **kwargs):
return await self.retrieve(request, *args, **kwargs)
async def put(self, request, *args, **kwargs):
return await self.update(request, *args, **kwargs)
async def patch(self, request, *args, **kwargs):
return await self.partial_update(request, *args, **kwargs)
class RetrieveDestroyAPIView(mixins.RetrieveModelMixin,
mixins.DestroyModelMixin,
GenericAPIView):
"""
用于检索或删除模型实例的具体视图。
"""
async def get(self, request, *args, **kwargs):
return await self.retrieve(request, *args, **kwargs)
async def delete(self, request, *args, **kwargs):
return await self.destroy(request, *args, **kwargs)
class RetrieveUpdateDestroyAPIView(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
GenericAPIView):
"""
用于检索、更新或删除模型实例的具体视图。
"""
async def get(self, request, *args, **kwargs):
return await self.retrieve(request, *args, **kwargs)
async def put(self, request, *args, **kwargs):
return await self.update(request, *args, **kwargs)
async def patch(self, request, *args, **kwargs):
return await self.partial_update(request, *args, **kwargs)
async def delete(self, request, *args, **kwargs):
return await self.destroy(request, *args, **kwargs)
|
/sanic-rest-framework-1.2.3.tar.gz/sanic-rest-framework-1.2.3/srf/generics.py
| 0.71889 | 0.248323 |
generics.py
|
pypi
|
class Parameter:
def __init__(self, field_name: str, field_type='string', used_for='query', title=None, required=False):
"""
@param field_name:
@param field_type:
@param used_for:
@param title:
@param required:
"""
self.field_name = field_name
self.title = title or self.field_name
self.field_type = field_type
self.used_for = used_for
self.required = required
self.field_type = field_type
def to_dict(self):
return {
"required": self.required,
"schema": {
"title": self.title,
"type": self.field_type
},
"name": self.field_name,
"in": self.used_for
}
class Parameters:
def __init__(self):
self._parameters = []
def add(self, item: Parameter):
self._parameters.append(item.to_dict())
@property
def parameters(self):
return self._parameters
class ApiKeySecurity:
def __init__(self, name='Token', used_for='header'):
self.name = name
self.used_for = used_for
def to_dict(self):
return {
'type': 'apiKey',
'in': self.used_for,
'name': self.name
}
class PropItem:
def __init__(self, title, field_type, field_format=None):
self.title = title
self.field_type = field_type
self.field_format = field_format
def to_dict(self):
field_dict = {
"title": self.title,
"type": self.field_type
}
if self.field_format:
field_dict['format'] = self.field_format
return field_dict
class ObjectItem:
def __init__(self, title):
self.title = title
self.field_type = 'object'
self.properties = {}
self.required = []
def add(self, field_name, field, required=False):
self.properties[field_name] = field
if required:
self.required.append(field_name)
def to_dict(self):
res = {
"title": self.title,
"type": self.field_type,
"properties": self.properties,
}
if self.required:
res['required'] = self.required
return res
class ArrayItem:
def __init__(self, title, items):
self.title = title
self.field_type = 'array'
self.items = items
def to_dict(self):
return {
"title": self.title,
"type": self.field_type,
"items": self.items,
}
|
/sanic-rest-framework-1.2.3.tar.gz/sanic-rest-framework-1.2.3/srf/openapi/openapi.py
| 0.839865 | 0.200479 |
openapi.py
|
pypi
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.