ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a38c9a93636592aec4eb66d3c750c48e026bbfd | # -*- coding: utf-8 -*-
days = int(input())
print("%d ano(s)" %( days // 365 ))
print("%d mes(es)" %( days % 365 // 30 ))
print("%d dia(s)" %( days % 365 % 30 )) |
py | 1a38c9f5c5855b5876bf46c20a48c49171f8b47e | # coding: utf-8
"""
Argo
Python client for Argo Workflows # noqa: E501
OpenAPI spec version: release-2.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class V1VsphereVirtualDiskVolumeSource(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'fs_type': 'str',
'storage_policy_id': 'str',
'storage_policy_name': 'str',
'volume_path': 'str'
}
attribute_map = {
'fs_type': 'fsType',
'storage_policy_id': 'storagePolicyID',
'storage_policy_name': 'storagePolicyName',
'volume_path': 'volumePath'
}
def __init__(self, fs_type=None, storage_policy_id=None, storage_policy_name=None, volume_path=None): # noqa: E501
"""V1VsphereVirtualDiskVolumeSource - a model defined in Swagger""" # noqa: E501
self._fs_type = None
self._storage_policy_id = None
self._storage_policy_name = None
self._volume_path = None
self.discriminator = None
if fs_type is not None:
self.fs_type = fs_type
if storage_policy_id is not None:
self.storage_policy_id = storage_policy_id
if storage_policy_name is not None:
self.storage_policy_name = storage_policy_name
self.volume_path = volume_path
@property
def fs_type(self):
"""Gets the fs_type of this V1VsphereVirtualDiskVolumeSource. # noqa: E501
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501
:return: The fs_type of this V1VsphereVirtualDiskVolumeSource. # noqa: E501
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""Sets the fs_type of this V1VsphereVirtualDiskVolumeSource.
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501
:param fs_type: The fs_type of this V1VsphereVirtualDiskVolumeSource. # noqa: E501
:type: str
"""
self._fs_type = fs_type
@property
def storage_policy_id(self):
"""Gets the storage_policy_id of this V1VsphereVirtualDiskVolumeSource. # noqa: E501
Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. # noqa: E501
:return: The storage_policy_id of this V1VsphereVirtualDiskVolumeSource. # noqa: E501
:rtype: str
"""
return self._storage_policy_id
@storage_policy_id.setter
def storage_policy_id(self, storage_policy_id):
"""Sets the storage_policy_id of this V1VsphereVirtualDiskVolumeSource.
Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. # noqa: E501
:param storage_policy_id: The storage_policy_id of this V1VsphereVirtualDiskVolumeSource. # noqa: E501
:type: str
"""
self._storage_policy_id = storage_policy_id
@property
def storage_policy_name(self):
"""Gets the storage_policy_name of this V1VsphereVirtualDiskVolumeSource. # noqa: E501
Storage Policy Based Management (SPBM) profile name. # noqa: E501
:return: The storage_policy_name of this V1VsphereVirtualDiskVolumeSource. # noqa: E501
:rtype: str
"""
return self._storage_policy_name
@storage_policy_name.setter
def storage_policy_name(self, storage_policy_name):
"""Sets the storage_policy_name of this V1VsphereVirtualDiskVolumeSource.
Storage Policy Based Management (SPBM) profile name. # noqa: E501
:param storage_policy_name: The storage_policy_name of this V1VsphereVirtualDiskVolumeSource. # noqa: E501
:type: str
"""
self._storage_policy_name = storage_policy_name
@property
def volume_path(self):
"""Gets the volume_path of this V1VsphereVirtualDiskVolumeSource. # noqa: E501
Path that identifies vSphere volume vmdk # noqa: E501
:return: The volume_path of this V1VsphereVirtualDiskVolumeSource. # noqa: E501
:rtype: str
"""
return self._volume_path
@volume_path.setter
def volume_path(self, volume_path):
"""Sets the volume_path of this V1VsphereVirtualDiskVolumeSource.
Path that identifies vSphere volume vmdk # noqa: E501
:param volume_path: The volume_path of this V1VsphereVirtualDiskVolumeSource. # noqa: E501
:type: str
"""
if volume_path is None:
raise ValueError("Invalid value for `volume_path`, must not be `None`") # noqa: E501
self._volume_path = volume_path
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(V1VsphereVirtualDiskVolumeSource, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1VsphereVirtualDiskVolumeSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a38ca94d79d8b52218175d657347bc66703d6a6 | from pydantic import BaseModel
from typing import List
from datetime import datetime
# models based on response model of /GET branding/api/v1/public/branding
class BrandingColorDetail(BaseModel):
type: str
rgba: str
class BrandingColor(BaseModel):
type: str
colorDetails: List[BrandingColorDetail]
class BrandingFile(BaseModel):
size: str
url: str
class BrandingImage(BaseModel):
type: str
files: List[BrandingFile]
class BrandingLanguageText(BaseModel):
languageTag: str
content: str
class BrandingText(BaseModel):
type: str
languages: List[BrandingLanguageText]
# Branding as returned by API
class Branding(BaseModel):
createdAt: datetime
changedAt: datetime
productName: str
colors: List[BrandingColor]
colorizeHeader: bool
images: List[BrandingImage]
texts: List[BrandingText]
imprintUrl: str
privacyUrl: str
supportUrl: str
emailContact: str
emailSender: str
positionLoginBox: int
appearanceLoginBox: str
# Generic uploaded image response
class BrandingImageUpload(BaseModel):
type: str
id: int
# Payload to update branding
class BrandingUpload(BaseModel):
appearanceLoginBox: str
colorizeHeader: bool
colors: List[BrandingColor]
emailContact: str
emailSender: str
images: List[BrandingImageUpload]
imprintUrl: str
positionLoginBox: int
privacyUrl: str
productName: str
supportUrl: str
texts: List[BrandingText]
class ImageResponse(BaseModel):
id: int
createdAt: datetime
|
py | 1a38cb501e9a4d9673db3c72ca70ba2f57613946 | import warnings
from datetime import timedelta
from string import digits
from typing import Union
CHAR_TO_RU_STR = {'y': ('лет', 'год', 'года'),
'M': ('Месяцев', 'Месяц', 'Месяца'),
'w': ('недель', 'неделя', 'недели'),
'd': ('дней', 'день', 'дня'),
'h': ('часов', 'час', 'часа'),
'm': ('минут', 'минута', 'минуты'),
's': ('секунд', 'секунда', 'секунды')}
CHAR_TO_SEC = {'y': 31536000, 'M': 2592000, 'w': 604800, 'd': 86400, 'h': 3600, 'm': 60, 's': 1,
'г': 31536000, 'л': 31536000, 'М': 2592000, 'н': 604800, 'д': 86400, 'ч': 3600, 'м': 60, 'с': 1}
CHAR_TO_SEC_KEYS = set(CHAR_TO_SEC.keys()) # speeds up parsing when checking keys
STR_TO_SEC = {'years': 31536000, 'months': 2592000, 'weeks': 604800,
'days': 86400, 'hours': 3600, 'minutes': 60, 'seconds': 1}
def _get_times(digit: Union[int, float], tm: str) -> Union[str, None]:
digit = round(digit)
if digit == 0:
return None
tmp = digit % 100
if 11 <= tmp <= 19:
return f"{digit} {CHAR_TO_RU_STR[tm][0]}"
tmp = digit % 10
if tmp == 1:
return f"{digit} {CHAR_TO_RU_STR[tm][1]}"
if 2 <= tmp <= 4:
return f"{digit} {CHAR_TO_RU_STR[tm][2]}"
if tmp == 0 or 5 <= tmp <= 9:
return f"{digit} {CHAR_TO_RU_STR[tm][0]}"
return f"{digit} {CHAR_TO_RU_STR[tm][2]}"
def human_parser(s: str) -> int:
tmp_digit: str = ''
seconds: int = 0
for char in s:
if char in digits:
tmp_digit += char
elif tmp_digit and char in CHAR_TO_SEC_KEYS:
seconds += int(tmp_digit) * CHAR_TO_SEC[char]
tmp_digit = ''
return seconds
class Sec2Hum:
__slots__ = ['years', 'months', 'weeks', 'days', 'hours', 'minutes', 'seconds', 'string']
def __init__(self, seconds: Union[int, float, timedelta]):
if isinstance(seconds, int) or isinstance(seconds, float):
seconds = abs(seconds)
elif isinstance(seconds, timedelta):
seconds = seconds.total_seconds()
else:
raise TypeError
if seconds == 0:
self.seconds = 0
self.string = '0 секунд'
else:
for k, v in STR_TO_SEC.items():
self.__setattr__(k, seconds // v)
seconds %= v
self.string = " ".join(filter(None, (_get_times(self.years, 'y'),
_get_times(self.months, 'M'),
_get_times(self.weeks, 'w'),
_get_times(self.days, 'd'),
_get_times(self.hours, 'h'),
_get_times(self.minutes, 'm'),
_get_times(self.seconds, 's'))))
def __str__(self) -> str:
return self.string
def __repr__(self) -> str:
return f"{self.__class__} {self.string}"
class Hum2Sec:
"""
:var self.seconds:
:type self.seconds: int
"""
__seconds: int
__timedelta: timedelta
def __init__(self, string: str):
"""
:param string: time-string to parse.
:type string: str.
"""
self.string = string
self.calculate()
def calculate(self):
if self.string.isdigit():
self.__seconds = int(self.string)
try:
self.__timedelta = timedelta(seconds=self.__seconds)
except OverflowError:
self.__timedelta = timedelta(seconds=999999999)
else:
self.__seconds = human_parser(self.string)
try:
self.__timedelta = timedelta(seconds=self.__seconds)
except OverflowError:
self.__timedelta = timedelta(seconds=999999999)
@property
def seconds(self):
return self.__seconds
@seconds.setter
def seconds(self, value):
raise ValueError
@property
def time_dlt(self):
return self.__timedelta
@time_dlt.setter
def time_dlt(self, value):
raise ValueError
@property
def delta(self):
"""
Deprecated, use time_dlt instead.
:return:
"""
warnings.warn("Hum2Sec.delta deprecated, use Hum2Sec.time_dlt instead.", DeprecationWarning, stacklevel=2)
return self.__timedelta
@delta.setter
def delta(self, value):
raise ValueError
def __str__(self) -> str:
return str(self.__seconds)
def __repr__(self) -> str:
return f"{self.__class__} {self.__seconds}"
|
py | 1a38cb8a5b5f37ed89b7163b66d00bb510a8167f | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v9.resources.types import user_interest
from google.ads.googleads.v9.services.types import user_interest_service
from .transports.base import UserInterestServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import UserInterestServiceGrpcTransport
class UserInterestServiceClientMeta(type):
"""Metaclass for the UserInterestService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[UserInterestServiceTransport]]
_transport_registry["grpc"] = UserInterestServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[UserInterestServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class UserInterestServiceClient(metaclass=UserInterestServiceClientMeta):
"""Service to fetch Google Ads User Interest."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
UserInterestServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
UserInterestServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> UserInterestServiceTransport:
"""Return the transport used by the client instance.
Returns:
UserInterestServiceTransport: The transport used by the client instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def user_interest_path(customer_id: str, user_interest_id: str,) -> str:
"""Return a fully-qualified user_interest string."""
return "customers/{customer_id}/userInterests/{user_interest_id}".format(
customer_id=customer_id, user_interest_id=user_interest_id,
)
@staticmethod
def parse_user_interest_path(path: str) -> Dict[str, str]:
"""Parse a user_interest path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/userInterests/(?P<user_interest_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, UserInterestServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the user interest service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.UserInterestServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, UserInterestServiceTransport):
# transport is a UserInterestServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = UserInterestServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_user_interest(
self,
request: Union[
user_interest_service.GetUserInterestRequest, dict
] = None,
*,
resource_name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> user_interest.UserInterest:
r"""Returns the requested user interest in full detail
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v9.services.types.GetUserInterestRequest, dict]):
The request object. Request message for
[UserInterestService.GetUserInterest][google.ads.googleads.v9.services.UserInterestService.GetUserInterest].
resource_name (:class:`str`):
Required. Resource name of the
UserInterest to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v9.resources.types.UserInterest:
A user interest: a particular
interest-based vertical to be targeted.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a user_interest_service.GetUserInterestRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, user_interest_service.GetUserInterestRequest
):
request = user_interest_service.GetUserInterestRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_user_interest
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("UserInterestServiceClient",)
|
py | 1a38cc151cf5cc0f518cb53f03f97b2a76bc3234 | # Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import unittest
import filecmp
import os
import yaml
import tarfile
import shutil
import sys
import logging
import logging.config
import bootstrap
class TestBootstrap(unittest.TestCase):
"""
Test the bootstrap's api
"""
def setUp(self):
try:
os.chdir(os.path.abspath("test"))
except:
pass
configuration_path = "test_logging.yaml"
if os.path.exists(configuration_path):
with open(configuration_path, 'rt') as f:
logging_configuration = yaml.safe_load(f.read())
logging.config.dictConfig(logging_configuration)
logging.getLogger()
def tearDown(self):
try:
os.chdir(os.path.abspath(".."))
except:
pass
# option_validation: Correct Option
def test_option_validation_correct_option(self):
class Object(object):
pass
args = Object()
# sudo ./bootstrap.py -p yourclusterconfig.yaml -a deploy
# Target: True
args.path = "testpath"
args.action = "deploy"
args.file = None
self.assertTrue(bootstrap.option_validation(args))
# sudo ./bootstrap.py -p yourclusterconfig.yaml -a clean
# Target: True
args.path = "testpath"
args.action = "clean"
args.file = None
self.assertTrue(bootstrap.option_validation(args))
# sudo ./bootstrap.py -p yourclusterconfig.yaml -a install_kubectl
# Target: True
args.path = "testpath"
args.action = "install_kubectl"
args.file = None
self.assertTrue(bootstrap.option_validation(args))
# sudo ./bootstrap.py -p yourclusterconfig.yaml -f yournodelist.yaml -a add
# Target: True
args.path = "testpath"
args.action = "add"
args.file = "testfile"
self.assertTrue(bootstrap.option_validation(args))
# sudo ./bootstrap.py -p yourclusterconfig.yaml -f yournodelist.yaml -a remove
# Target: True
args.path = "testpath"
args.action = "remove"
args.file = "testfile"
self.assertTrue(bootstrap.option_validation(args))
# sudo ./bootstrap.py -p yourclusterconfig.yaml -f yournodelist.yaml -a repair
# Target: True
args.path = "testpath"
args.action = "repair"
args.file = "testfile"
self.assertTrue(bootstrap.option_validation(args))
# option_validation: Missing option
def test_option_validation_missing_option(self):
class Object(object):
pass
args = Object()
# sudo ./bootstrap.py -p yourclusterconfig.yaml -a add
# Target: False
args.path = "testpath"
args.action = "add"
args.file = None
self.assertFalse(bootstrap.option_validation(args))
# sudo ./bootstrap.py -p yourclusterconfig.yaml -a remove
# Target: False
args.path = "testpath"
args.action = "remove"
args.file = None
self.assertFalse(bootstrap.option_validation(args))
# sudo ./bootstrap.py -p yourclusterconfig.yaml -a repair
# Target: False
args.path = "testpath"
args.action = "repair"
args.file = None
self.assertFalse(bootstrap.option_validation(args))
# option_validation: Wrong comination
def test_option_validation_wrong_combination(self):
class Object(object):
pass
args = Object()
# sudo ./bootstrap.py -p yourclusterconfig.yaml -f yournodelist.yaml -a deploy
# Target: False
args.path = "testpath"
args.action = "deploy"
args.file = "testfile"
self.assertFalse(bootstrap.option_validation(args))
# sudo ./bootstrap.py -p yourclusterconfig.yaml -f yournodelist.yaml -a clean
# Target: False
args.path = "testpath"
args.action = "clean"
args.file = "testfile"
self.assertFalse(bootstrap.option_validation(args))
# sudo ./bootstrap.py -p yourclusterconfig.yaml -f yournodelist.yaml -a install_kubectl
# Target: False
args.path = "testpath"
args.action = "install_kubectl"
args.file = "testfile"
self.assertFalse(bootstrap.option_validation(args))
# option_validation: Non-existent option
def test_option_validation_non_existent_option(self):
class Object(object):
pass
args = Object()
# sudo ./bootstrap.py -p yourclusterconfig.yaml -f yournodelist.yaml -a false
# Target: False
args.path = "testpath"
args.action = "false"
args.file = "testfile"
self.assertFalse(bootstrap.option_validation(args))
# sudo ./bootstrap.py -p yourclusterconfig.yaml -a false
# Target: False
args.path = "testpath"
args.action = "false"
args.file = None
self.assertFalse(bootstrap.option_validation(args))
if __name__ == '__main__':
unittest.main()
|
py | 1a38ccec8914b187623054038cc567540bfb02de | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Conv2d, Sequential, ModuleList, ReLU
from src.ssd import SSD
from src import rfb_config
from src import config
rfb_config.define_img_size(config.NETWORK_INPUT_SIZE)
class BasicConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1,
padding=0, dilation=1, groups=1, relu=True, bn=True):
super(BasicConv, self).__init__()
self.out_channels = out_planes
if bn:
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=False)
self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True)
self.relu = nn.ReLU(inplace=True) if relu else None
else:
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=True)
self.bn = None
self.relu = nn.ReLU(inplace=True) if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class BasicRFB(nn.Module):
def __init__(self, in_planes, out_planes, stride=1, scale=0.1, map_reduce=8, vision=1, groups=1):
super(BasicRFB, self).__init__()
self.scale = scale
self.out_channels = out_planes
inter_planes = in_planes // map_reduce
self.branch0 = nn.Sequential(
BasicConv(in_planes, inter_planes, kernel_size=1, stride=1, groups=groups, relu=False),
BasicConv(inter_planes, 2 * inter_planes, kernel_size=(3, 3),
stride=stride, padding=(1, 1), groups=groups),
BasicConv(2 * inter_planes, 2 * inter_planes, kernel_size=3, stride=1,
padding=vision + 1, dilation=vision + 1, relu=False, groups=groups)
)
self.branch1 = nn.Sequential(
BasicConv(in_planes, inter_planes, kernel_size=1, stride=1, groups=groups, relu=False),
BasicConv(inter_planes, 2 * inter_planes, kernel_size=(3, 3),
stride=stride, padding=(1, 1), groups=groups),
BasicConv(2 * inter_planes, 2 * inter_planes, kernel_size=3, stride=1,
padding=vision + 2, dilation=vision + 2, relu=False, groups=groups)
)
self.branch2 = nn.Sequential(
BasicConv(in_planes, inter_planes, kernel_size=1, stride=1, groups=groups, relu=False),
BasicConv(inter_planes, (inter_planes // 2) * 3, kernel_size=3,
stride=1, padding=1, groups=groups),
BasicConv((inter_planes // 2) * 3, 2 * inter_planes, kernel_size=3,
stride=stride, padding=1, groups=groups),
BasicConv(2 * inter_planes, 2 * inter_planes, kernel_size=3, stride=1,
padding=vision + 4, dilation=vision + 4, relu=False, groups=groups)
)
self.ConvLinear = BasicConv(6 * inter_planes, out_planes, kernel_size=1, stride=1, relu=False)
self.shortcut = BasicConv(in_planes, out_planes, kernel_size=1, stride=stride, relu=False)
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
out = self.ConvLinear(out)
short = self.shortcut(x)
out = out * self.scale + short
out = self.relu(out)
return out
class RFB(nn.Module):
def __init__(self, num_classes=2, reduced=False):
super(RFB, self).__init__()
self.base_channel = 8 * 2
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True))
def conv_dw(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU(inplace=True),
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True))
self.model = nn.Sequential(
conv_bn(3, self.base_channel, 2), # 160*120
conv_dw(self.base_channel, self.base_channel * 2, 1),
conv_dw(self.base_channel * 2, self.base_channel * 2, 2), # 80*60
conv_dw(self.base_channel * 2, self.base_channel * 2, 1),
conv_dw(self.base_channel * 2, self.base_channel * 4, 2), # 40*30
conv_dw(self.base_channel * 4, self.base_channel * 4, 1),
conv_dw(self.base_channel * 4, self.base_channel * 4, 1),
conv_dw(self.base_channel * 4, self.base_channel * 4, 1) if reduced else BasicRFB(self.base_channel * 4, self.base_channel * 4, stride=1, scale=1.0),
conv_dw(self.base_channel * 4, self.base_channel * 8, 2), # 20*15
conv_dw(self.base_channel * 8, self.base_channel * 8, 1),
conv_dw(self.base_channel * 8, self.base_channel * 8, 1),
conv_dw(self.base_channel * 8, self.base_channel * 16, 2), # 10*8
conv_dw(self.base_channel * 16, self.base_channel * 16, 1)
)
self.fc = nn.Linear(1024, num_classes)
def forward(self, x):
x = self.model(x)
x = F.avg_pool2d(x, 7)
x = x.view(-1, 1024)
x = self.fc(x)
return x
def deepPointwiseConv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0):
"""Replace Conv2d with a depthwise Conv2d and Pointwise Conv2d."""
return Sequential(
Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size,
groups=in_channels, stride=stride, padding=padding),
ReLU(),
Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1))
def create_net(num_classes, is_test=False, device="cuda", reduced=False):
base_net = RFB(2, reduced=reduced)
base_net_model = base_net.model # disable dropout layer
source_layer_indexes = [8, 11, 13]
extras = ModuleList([
Sequential(
Conv2d(in_channels=base_net.base_channel * 16, out_channels=base_net.base_channel * 4, kernel_size=1),
ReLU(),
deepPointwiseConv2d(in_channels=base_net.base_channel * 4,
out_channels=base_net.base_channel * 16, kernel_size=3, stride=2, padding=1),
ReLU()
)
])
regression_headers = ModuleList([
deepPointwiseConv2d(in_channels=base_net.base_channel * 4, out_channels=3 * 4, kernel_size=3, padding=1),
deepPointwiseConv2d(in_channels=base_net.base_channel * 8, out_channels=2 * 4, kernel_size=3, padding=1),
deepPointwiseConv2d(in_channels=base_net.base_channel * 16, out_channels=2 * 4, kernel_size=3, padding=1),
Conv2d(in_channels=base_net.base_channel * 16, out_channels=3 * 4, kernel_size=3, padding=1)
])
classification_headers = ModuleList([
deepPointwiseConv2d(in_channels=base_net.base_channel * 4, out_channels=3 * num_classes, kernel_size=3, padding=1),
deepPointwiseConv2d(in_channels=base_net.base_channel * 8, out_channels=2 * num_classes, kernel_size=3, padding=1),
deepPointwiseConv2d(in_channels=base_net.base_channel * 16, out_channels=2 * num_classes, kernel_size=3, padding=1),
Conv2d(in_channels=base_net.base_channel * 16, out_channels=3 * num_classes, kernel_size=3, padding=1)
])
return SSD(num_classes, base_net_model, source_layer_indexes,
extras, classification_headers, regression_headers, is_test=is_test, config=rfb_config, device=device)
|
py | 1a38cd984ad960c926c417aa7c02109b611e9529 | # ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
"""
test_printer.py
Test printer.
"""
from ydkgen.common import iscppkeyword
from ydkgen.builder import TestBuilder
from ydkgen.builder import FixtureBuilder
from .test_fixture_printer import FixturePrinter
from ydkgen.common import get_top_class, get_element_path, get_path_sep, get_obj_name, \
get_qn, is_reference_prop, is_terminal_prop, is_empty_prop, \
is_identity_prop, is_decimal64_prop
_IGNORE_TESTS = set({'ietf_netconf_acm'})
class TestPrinter(FixturePrinter):
"""Test printer."""
def __init__(self, ctx, lang):
super(TestPrinter, self).__init__(ctx, lang)
def print_tests(self, package, identity_subclasses):
"""Print all test case."""
self.package = package
self.identity_subclasses = identity_subclasses
test_builder = TestBuilder(self.lang, identity_subclasses)
fixture_builder = FixtureBuilder(self.lang, identity_subclasses)
test_builder.build_test(package)
imports = fixture_builder.get_imports(package, test_builder)
self.print_fixture_head(package, imports)
if package.name not in _IGNORE_TESTS:
self._print_test_case(package, imports, test_builder)
self.print_fixture_tail(package)
def _print_test_case(self, package, imports, test_builder):
"""Print a single test case."""
for test_case in test_builder.test_cases:
stmts = test_case.stmts
test_name = test_case.test_name
clazz = test_case.clazz
top_classes = list(test_case.ref_top_classes.values())
self._print_test_case_header(test_name)
self._print_test_case_body(stmts, clazz, top_classes)
self._print_test_case_trailer()
def _print_test_case_body(self, stmts, clazz, top_classes):
self._print_test_case_requisites(stmts)
self._print_test_case_crud_stmts(stmts, clazz, top_classes)
self._print_test_case_cleanup(clazz, top_classes)
self._print_test_case_compare(clazz)
def _print_test_case_requisites(self, stmts):
self._print_requsite_declarations(stmts)
self._print_requisite_stmts(stmts)
self._print_unadjust_leaflist_append_stmts(stmts)
self._print_requisite_reference_stmts(stmts)
self._print_requisite_adjustments(stmts)
self._print_requisite_leaflist_adjusted(stmts)
def _print_requsite_declarations(self, stmts):
for path, val in stmts.declaration_stmts.items():
self._write_end(self.declaration_fmt.format(path, val))
def _print_unadjust_leaflist_append_stmts(self, stmts):
for path, val in stmts.unadjusted_leaflist_appends:
self._write_end(self.leaflist_append_fmt.format(path, val))
def _print_requisite_stmts(self, stmts):
sorted_paths = sorted(list(stmts.append_stmts.keys()) +
list(stmts.assignment_stmts.keys()))
for path in sorted_paths:
if path in stmts.append_stmts:
self._print_requisite_list_append(stmts, path)
elif path in stmts.assignment_stmts:
self._print_requisite_assignment(stmts, path)
def _print_requisite_list_append(self, stmts, path):
val = stmts.append_stmts[path]
self._print_requisite_list_parent_pointer(path, val)
self._write_end(self.append_fmt.format(path, val))
def _print_requisite_list_parent_pointer(self, path, val):
# parent pointer is set by YList append method in Python,
# no need to print
if self.lang == 'cpp' and self.sep in path:
parent = self.sep.join(path.split(self.sep)[:-1])
parent_path = self.sep.join([val, 'parent'])
self._write_end(self.cpp_leaf_fmt.format(parent_path, parent))
def _print_requisite_assignment(self, stmts, path):
val = stmts.assignment_stmts[path]
fmt = self.get_assignment_fmt(path)
self._write_end(fmt.format(path, val))
def _print_requisite_reference_stmts(self, stmts):
for path in sorted(stmts.reference_stmts):
val = stmts.reference_stmts[path]
self._write_end(self.ref_fmt.format(path, val))
def _print_requisite_adjustments(self, stmts):
for path in sorted(stmts.adjustment_stmts):
val = stmts.adjustment_stmts[path]
self._write_end(self.ref_fmt.format(path, val))
for path in sorted(stmts.reference_adjustment_stmts):
val = stmts.reference_adjustment_stmts[path]
self._write_end(self.ref_fmt.format(path, val))
def _print_requisite_leaflist_adjusted(self, stmts):
for path, val in stmts.adjusted_leaflist_appends.items():
self._write_end(self.leaflist_append_fmt.format(path, val))
def _print_test_case_crud_stmts(self, stmts, clazz, top_classes):
for top_class in top_classes:
self._print_crud_create_stmts(top_class)
top_class = get_top_class(clazz)
self._print_crud_create_stmts(top_class)
self._print_crud_read_stmts(top_class)
def _print_crud_create_stmts(self, top_class):
top_obj_name = get_obj_name(top_class)
self._print_logging('Creating {}...'.format(top_obj_name))
fmt = self._get_crud_fmt('create')
self._write_end(fmt.format(top_obj_name))
def _print_crud_read_stmts(self, top_class):
top_obj_name = get_obj_name(top_class)
read_obj_name = '{}_read'.format(top_obj_name)
filter_obj_name = '{}_filter'.format(top_obj_name)
qn = get_qn(self.lang, top_class)
self._print_logging('Reading {}...'.format(top_obj_name))
self._write_end(self.declaration_fmt.format(filter_obj_name, qn))
fmt = self._get_crud_fmt('read')
stmt = fmt.format(filter_obj_name)
fmt = self.read_ret_fmt
if self.lang == 'py':
self._write_end(fmt.format(read_obj_name, stmt))
elif self.lang == 'cpp':
self._write_end('auto read_unique_ptr = {}'.format(stmt))
self._write_end('CHECK( read_unique_ptr != nullptr)')
self._write_end(fmt.format(read_obj_name, qn, 'read_unique_ptr'))
def _print_test_case_cleanup(self, clazz, top_classes):
self._print_crud_delete_stmts(clazz)
for clazz in top_classes:
self._print_crud_delete_stmts(clazz)
def _print_crud_delete_stmts(self, clazz):
top_class = get_top_class(clazz)
top_obj_name = get_obj_name(top_class)
fmt = self._get_crud_fmt('delete')
self._print_logging('Deleting {}...'.format(top_obj_name))
self._write_end(fmt.format(top_obj_name))
def _print_test_case_compare(self, clazz):
self._print_logging('Comparing leaf/leaf-lists...')
for prop in clazz.properties():
if is_reference_prop(prop) or is_terminal_prop(prop):
# unable to compare empty
# read object will not be assigned to Empty() automatically
if not is_empty_prop(prop):
self._print_compare_stmt(prop)
def _print_compare_stmt(self, prop):
if is_identity_prop(prop) or is_decimal64_prop(prop):
# unable to compare decimal64 in Python
# unable to compare identity in C++ and Python
return
lhs = self._get_element_path(prop)
top_class_name, path = lhs.split(self.sep, 1)
top_class_name = '{}_read'.format(top_class_name)
rhs = self.sep.join([top_class_name, path])
self._write_end(self.compare_fmt.format(lhs, rhs))
def _print_test_case_header(self, test_name):
if self.lang == 'py':
self._writeln('def test_{}s(self):'.format(test_name))
elif self.lang == 'cpp':
self._writeln('TEST_CASE_METHOD( ConnectionFixture, "{}_{}_test" )'.format(self.package.name, test_name))
self._writeln('{')
self._lvl_inc()
self._lvl_inc()
def _print_test_case_trailer(self):
self._lvl_dec()
if self.lang == 'py':
self._bline()
elif self.lang == 'cpp':
self._lvl_dec()
self._writeln('}')
self._bline()
def _print_logging(self, msg):
self._bline()
if self.lang == 'py':
self._write_end('logger.info("{}")'.format(msg))
def get_assignment_fmt(self, path):
fmt = '{} = {}'
if self.sep not in path and self.lang == 'cpp':
fmt = 'auto {} = {}'
return fmt
def _get_crud_fmt(self, oper):
if self.lang == 'py':
fmt = 'self.crud.{}(self.ncc, {{}})'.format(oper)
elif self.lang == 'cpp':
if iscppkeyword(oper):
oper = '{}_'.format(oper)
fmt = 'm_crud.{}(*m_provider, *{{}})'.format(oper)
return fmt
@property
def declaration_fmt(self):
fmt = '{} = {}()'
if self.lang == 'cpp':
fmt = 'auto {} = std::make_unique<{}>()'
return fmt
@property
def leaflist_append_fmt(self):
fmt = '{}.append({})'
if self.lang == 'cpp':
fmt = '{}.append(std::move({}))'
return fmt
@property
def append_fmt(self):
fmt = '{}.append({})'
if self.lang == 'cpp':
fmt = '{}.emplace_back(std::move({}))'
return fmt
@property
def cpp_leaf_fmt(self):
return '{} = {}.get()'
@property
def ref_fmt(self):
fmt = '{} = {}'
if self.lang == 'cpp':
fmt = '{} = {}.get()'
return fmt
@property
def compare_fmt(self):
fmt = 'self.assertEqual({}, {})'
if self.lang == 'cpp':
fmt = 'CHECK( {} == {} )'
return fmt
@property
def read_ret_fmt(self):
fmt = '{} = {}'
if self.lang == 'cpp':
fmt = 'auto {} = dynamic_cast<{}*>({}.get())'
return fmt
def _get_element_path(self, element):
return get_element_path(self.lang, element)
@property
def sep(self):
return get_path_sep(self.lang)
|
py | 1a38ce9345b9c8d3ad8274687f1d08c23a42a06f | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .delegation_set import *
from .get_delegation_set import *
from .get_resolver_endpoint import *
from .get_resolver_rule import *
from .get_resolver_rules import *
from .get_zone import *
from .health_check import *
from .hosted_zone_dns_sec import *
from .key_signing_key import *
from .query_log import *
from .record import *
from .resolver_dns_sec_config import *
from .resolver_endpoint import *
from .resolver_firewall_domain_list import *
from .resolver_firewall_rule_group import *
from .resolver_query_log_config import *
from .resolver_query_log_config_association import *
from .resolver_rule import *
from .resolver_rule_association import *
from .vpc_association_authorization import *
from .zone import *
from .zone_association import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from .. import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "aws:route53/delegationSet:DelegationSet":
return DelegationSet(name, pulumi.ResourceOptions(urn=urn))
elif typ == "aws:route53/healthCheck:HealthCheck":
return HealthCheck(name, pulumi.ResourceOptions(urn=urn))
elif typ == "aws:route53/hostedZoneDnsSec:HostedZoneDnsSec":
return HostedZoneDnsSec(name, pulumi.ResourceOptions(urn=urn))
elif typ == "aws:route53/keySigningKey:KeySigningKey":
return KeySigningKey(name, pulumi.ResourceOptions(urn=urn))
elif typ == "aws:route53/queryLog:QueryLog":
return QueryLog(name, pulumi.ResourceOptions(urn=urn))
elif typ == "aws:route53/record:Record":
return Record(name, pulumi.ResourceOptions(urn=urn))
elif typ == "aws:route53/resolverDnsSecConfig:ResolverDnsSecConfig":
return ResolverDnsSecConfig(name, pulumi.ResourceOptions(urn=urn))
elif typ == "aws:route53/resolverEndpoint:ResolverEndpoint":
return ResolverEndpoint(name, pulumi.ResourceOptions(urn=urn))
elif typ == "aws:route53/resolverFirewallDomainList:ResolverFirewallDomainList":
return ResolverFirewallDomainList(name, pulumi.ResourceOptions(urn=urn))
elif typ == "aws:route53/resolverFirewallRuleGroup:ResolverFirewallRuleGroup":
return ResolverFirewallRuleGroup(name, pulumi.ResourceOptions(urn=urn))
elif typ == "aws:route53/resolverQueryLogConfig:ResolverQueryLogConfig":
return ResolverQueryLogConfig(name, pulumi.ResourceOptions(urn=urn))
elif typ == "aws:route53/resolverQueryLogConfigAssociation:ResolverQueryLogConfigAssociation":
return ResolverQueryLogConfigAssociation(name, pulumi.ResourceOptions(urn=urn))
elif typ == "aws:route53/resolverRule:ResolverRule":
return ResolverRule(name, pulumi.ResourceOptions(urn=urn))
elif typ == "aws:route53/resolverRuleAssociation:ResolverRuleAssociation":
return ResolverRuleAssociation(name, pulumi.ResourceOptions(urn=urn))
elif typ == "aws:route53/vpcAssociationAuthorization:VpcAssociationAuthorization":
return VpcAssociationAuthorization(name, pulumi.ResourceOptions(urn=urn))
elif typ == "aws:route53/zone:Zone":
return Zone(name, pulumi.ResourceOptions(urn=urn))
elif typ == "aws:route53/zoneAssociation:ZoneAssociation":
return ZoneAssociation(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("aws", "route53/delegationSet", _module_instance)
pulumi.runtime.register_resource_module("aws", "route53/healthCheck", _module_instance)
pulumi.runtime.register_resource_module("aws", "route53/hostedZoneDnsSec", _module_instance)
pulumi.runtime.register_resource_module("aws", "route53/keySigningKey", _module_instance)
pulumi.runtime.register_resource_module("aws", "route53/queryLog", _module_instance)
pulumi.runtime.register_resource_module("aws", "route53/record", _module_instance)
pulumi.runtime.register_resource_module("aws", "route53/resolverDnsSecConfig", _module_instance)
pulumi.runtime.register_resource_module("aws", "route53/resolverEndpoint", _module_instance)
pulumi.runtime.register_resource_module("aws", "route53/resolverFirewallDomainList", _module_instance)
pulumi.runtime.register_resource_module("aws", "route53/resolverFirewallRuleGroup", _module_instance)
pulumi.runtime.register_resource_module("aws", "route53/resolverQueryLogConfig", _module_instance)
pulumi.runtime.register_resource_module("aws", "route53/resolverQueryLogConfigAssociation", _module_instance)
pulumi.runtime.register_resource_module("aws", "route53/resolverRule", _module_instance)
pulumi.runtime.register_resource_module("aws", "route53/resolverRuleAssociation", _module_instance)
pulumi.runtime.register_resource_module("aws", "route53/vpcAssociationAuthorization", _module_instance)
pulumi.runtime.register_resource_module("aws", "route53/zone", _module_instance)
pulumi.runtime.register_resource_module("aws", "route53/zoneAssociation", _module_instance)
_register_module()
|
py | 1a38cec36cd5b12a51d94965e62ad26f8682238a | from setuptools import setup
package_name = 'carebt_kb'
setup(
name=package_name,
version='0.0.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='Andreas Steck',
maintainer_email='[email protected]',
description='A ROS2 Knowledge Base implementation.',
license='Apache License 2.0',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'carebt_kb = carebt_kb.carebt_kb:main'
],
},
)
|
py | 1a38d08856759e33f36784635ce36d42a8d31c98 | # %%
import pandas as pd
import numpy as np
# %%
# Data Preprocess
df=pd.read_csv("./dataset/google-play-store-apps/googleplaystore.csv")
for i in df:
print(df[i].value_counts())
df.replace("NaN",np.nan,inplace=True)
df.isnull().sum()
# %%
df.dropna(inplace=True)
# %%
out=pd.DataFrame(df,columns=["App","Category","Rating","Reviews","Size","Installs","Price","ContentRating"])
out.to_csv("preprocess.csv",index=None)
# %%
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus'] = False
#matplotlib画图中中文显示会有问题,需要这两行设置默认字体
df=pd.read_csv("./dataset/google-play-store-apps/googleplaystore.csv")
df.drop_duplicates(subset='App', inplace=True)#去重
df = df[df['AndroidVer'] != np.nan]#去除掉空值
df = df[df['AndroidVer'] != 'NaN']#去除掉空值
df = df[df['Installs'] != 'Free']#去除掉串列明显写错的sample
df = df[df['Installs'] != 'Paid']#去除掉串列明显写错的sample
print('Number of apps in the dataset : ' , len(df))
df['Installs']= df['Installs'].apply(lambda x: x.replace('+', '') if '+' in str(x) else x)
df['Installs']= df['Installs'].apply(lambda
x: x.replace(',', '') if ',' in str(x) else x)
df['Installs']= df['Installs'].apply(lambda x: int(x))
df['Size'] = df['Size'].apply(lambda x: str(x).replace('Varies with device', 'NaN') if 'Varies with device' in str(x) else x)
df['Size'] = df['Size'].apply(lambda x: str(x).replace('M', '') if 'M' in str(x) else x)
df['Size'] = df['Size'].apply(lambda x: str(x).replace(',', '') if 'M' in str(x) else x)
df['Size'] = df['Size'].apply(lambda x: float(str(x).replace('k', '')) / 1000 if 'k' in str(x) else x)
df['Size'] = df['Size'].apply(lambda x: float(x))
df['Installs']=df['Installs'].apply(lambda x: float(x))
df['Price'] = df['Price'].apply(lambda x: str(x).replace('$', '') if '$' in str(x) else str(x))
df['Price'] = df['Price'].apply(lambda x: float(x))
df['Reviews']= df['Reviews'].apply(lambda
x: int(x))
# %%
plt.figure(figsize=(15,10))
g=sns.countplot(x="Category",data=df, palette = "Set1")
g.set_xticklabels(g.get_xticklabels(), rotation=90, ha="right")
plt.savefig('CountApps.png', dpi=1000)
plt.show()
# %%
x = df['Rating'].dropna()
y = df['Size'].dropna()
z = df['Installs'][df.Installs!=0].dropna()
p = df['Reviews'][df.Reviews!=0].dropna()
t = df['Type'].dropna()
price = df['Price']
p= sns.pairplot(pd.DataFrame(list(zip(x, y, np.log(z), np.log10(p), t, price)),
columns=['Rating','Size', 'Installs', 'Reviews', 'Type', 'Price']), hue='Type', palette="Set2")
plt.savefig('relation.png', dpi=300)
plt.show()
# %%
plt.figure(figsize=(10,10))
sns.boxplot(x="Type", y="Rating", hue="ContentRating", data=df, palette="PRGn")
plt.savefig('box.png', dpi=600)
plt.show()
# %%
subset_df= df[df.Category.isin(['GAME', 'FAMILY', 'PHOTOGRAPHY', 'MEDICAL', 'TOOLS', 'FINANCE','LIFESTYLE','BUSINESS'])]
sns.set_style('darkgrid')
fig, ax = plt.subplots()
fig.set_size_inches(15, 8)
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus'] = False
p = sns.stripplot(x="Price", y="Category", data=subset_df, jitter=True, linewidth=1)
#title = ax.set_title('不同类别的App的价格趋势',size = 25)
plt.savefig('不同类别的App的价格趋势.png', dpi=300)
plt.show()
# %%
df[['Category', 'App']][df.Price > 200]
fig, ax = plt.subplots()
fig.set_size_inches(15, 8)
subset_df_price= subset_df[subset_df.Price<100]
p = sns.stripplot(x="Price", y="Category", data=subset_df_price, jitter=True, linewidth=1)
plt.savefig('Price.png', dpi=300)
plt.show()
# %%
|
py | 1a38d0db4d93d331a6ece51a0abc738abace5fa3 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for control_flow module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.converters import control_flow
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class ControlFlowTest(converter_testing.TestCase):
def assertTransformedResult(self, test_fn, inputs, expected):
if not isinstance(inputs, tuple):
inputs = (inputs,)
with self.converted(test_fn, control_flow, {},
constant_op.constant) as result:
with self.cached_session() as sess:
self.assertEqual(sess.run(result.test_fn(*inputs)), expected)
@test_util.run_deprecated_v1
def test_while_basic(self):
def test_fn(n):
i = 0
s = 0
while i < n:
s += i
i += 1
return s, i, n
self.assertTransformedResult(test_fn, constant_op.constant(5), (10, 5, 5))
@test_util.run_deprecated_v1
def test_while_nested(self):
def test_fn(n):
i = 0
j = 0
s = 0
while i < n:
while j < i:
j += 3
u = i + j # 'u' is not defined within the inner loop
s += u
i += 1
j = 0
return s, i, j, n
self.assertTransformedResult(test_fn, constant_op.constant(5),
(25, 5, 0, 5))
@test_util.run_deprecated_v1
def test_while_single_output(self):
def test_fn(n):
while n > 0:
n -= 1
return n
self.assertTransformedResult(test_fn, constant_op.constant(5), 0)
def test_while_variable_defined_in_body(self):
def bad_while_loop(n):
while n > 0:
n -= 1
s = n
return s
node, ctx = self.prepare(bad_while_loop, {})
with self.assertRaises(NameError):
control_flow.transform(node, ctx)
@test_util.run_deprecated_v1
def test_if_basic(self):
def test_fn(n):
a = 0
b = 0
if n > 0:
a = -n
else:
b = 2 * n
return a, b
self.assertTransformedResult(test_fn, constant_op.constant(1), (-1, 0))
self.assertTransformedResult(test_fn, constant_op.constant(-1), (0, -2))
@test_util.run_deprecated_v1
def test_if_complex_outputs(self):
class TestClass(object):
def __init__(self, a, b):
self.a = a
self.b = b
def test_fn(n, obj):
obj.a = 0
obj.b = 0
if n > 0:
obj.a = -n
else:
obj.b = 2 * n
return obj
with self.converted(test_fn, control_flow, {}) as result:
with self.cached_session() as sess:
res_obj = result.test_fn(constant_op.constant(1), TestClass(0, 0))
self.assertEqual(sess.run((res_obj.a, res_obj.b)), (-1, 0))
res_obj = result.test_fn(constant_op.constant(-1), TestClass(0, 0))
self.assertEqual(sess.run((res_obj.a, res_obj.b)), (0, -2))
@test_util.run_deprecated_v1
def test_if_single_output(self):
def test_fn(n):
if n > 0:
n = -n
return n
self.assertTransformedResult(test_fn, constant_op.constant(1), -1)
@test_util.run_deprecated_v1
def test_if_semi(self):
def test_fn(n):
if n > 0:
n = 3
return n
self.assertTransformedResult(test_fn, constant_op.constant(2), 3)
self.assertTransformedResult(test_fn, constant_op.constant(-3), -3)
@test_util.run_deprecated_v1
def test_if_local_var(self):
def test_fn(n):
if n > 0:
b = 4
n = b + 1
return n
self.assertTransformedResult(test_fn, constant_op.constant(1), 5)
self.assertTransformedResult(test_fn, constant_op.constant(-1), -1)
@test_util.run_deprecated_v1
def test_if_no_outputs(self):
def test_fn(n):
if n > 0:
b = 4 # pylint:disable=unused-variable
return n
# Without side effect guards, the if statement will stage a cond,
# but that will be pruned at execution.
self.assertTransformedResult(test_fn, constant_op.constant(1), 1)
self.assertTransformedResult(test_fn, constant_op.constant(-1), -1)
def test_if_imbalanced_outputs(self):
def test_fn(n):
if n > 0:
b = 4
return b
node, ctx = self.prepare(test_fn, {})
with self.assertRaises(transformer.AutoGraphParseError):
control_flow.transform(node, ctx)
@test_util.run_deprecated_v1
def test_simple_for(self):
def test_fn(l):
s1 = 0
s2 = 0
for e in l:
s1 += e
s2 += e * e
return s1, s2
self.assertTransformedResult(test_fn, constant_op.constant([1, 3]), (4, 10))
empty_vector = constant_op.constant([], shape=(0,), dtype=dtypes.int32)
self.assertTransformedResult(test_fn, empty_vector, (0, 0))
@test_util.run_deprecated_v1
def test_for_single_output(self):
def test_fn(l):
s = 0
for e in l:
s += e
return s
self.assertTransformedResult(test_fn, constant_op.constant([1, 3]), 4)
empty_vector = constant_op.constant([], shape=(0,), dtype=dtypes.int32)
self.assertTransformedResult(test_fn, empty_vector, 0)
def test_for_iterated_expression(self):
eval_count = [0]
def count_evals(x):
eval_count[0] += 1
return x
def test_fn(n):
s = 0
for e in count_evals(range(n)):
s += e
return s
ns = {'count_evals': count_evals}
node, ctx = self.prepare(test_fn, ns)
node = control_flow.transform(node, ctx)
with self.compiled(node, ns) as result:
self.assertEqual(result.test_fn(5), 10)
self.assertEqual(eval_count[0], 1)
def test_for_variable_defined_in_body(self):
def bad_for_loop(n):
for i in range(n):
s = i
return s
node, ctx = self.prepare(bad_for_loop, {})
with self.assertRaises(NameError):
control_flow.transform(node, ctx)
@test_util.run_deprecated_v1
def test_for_tuple_unpacking(self):
def test_fn(x_list):
z = tf.constant(0) # pylint:disable=undefined-variable
for i, x in enumerate(x_list):
z = z + x + i
return z
self.assertTransformedResult(test_fn, [3, 3], 7)
if __name__ == '__main__':
test.main()
|
py | 1a38d180f7e51c2189102dfd2009ad63ebe139f3 | """
String Operators
"""
# Create two variables, each of which is half of a compound sentence.
## Example compound sentence: "I'll go to the beach today, and I'll go snorkeling." |
py | 1a38d3e0fb52b917fbeb168f30d34e60ece0f878 | import scrapy
from scrapy.loader import ItemLoader
from itemloaders_example.items import QuoteItem
class QuotesWithItemLoaderSpider(scrapy.Spider):
name = "quotes-with-itemloader"
start_urls = [
'http://quotes.toscrape.com',
]
def parse(self, response):
for quote in response.css('div.quote'):
# check the items.QuoteItem class to see how we've defined
# the input and output processors for each one of these fields
il = ItemLoader(item=QuoteItem(), selector=quote)
il.add_css('text', 'span.text::text')
il.add_css('author_name', 'small.author::text')
il.add_css('tags', 'a.tag::text')
il.add_value('url', response.url)
yield il.load_item()
next_page = response.css("li.next > a::attr(href)").extract_first()
if next_page is not None:
url = response.urljoin(next_page)
yield scrapy.Request(url, callback=self.parse)
|
py | 1a38d44fc050dba3825d0b47a872085e23ec3408 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
from .cdm_attribute_context_type import CdmAttributeContextType
from .cdm_data_format import CdmDataFormat
from .cdm_object_type import CdmObjectType
from .cdm_relationship_discovery_style import CdmRelationshipDiscoveryStyle
from .cdm_status_level import CdmStatusLevel
from .cdm_validation_step import CdmValidationStep
__all__ = [
'CdmAttributeContextType',
'CdmDataFormat',
'CdmObjectType',
'CdmRelationshipDiscoveryStyle',
'CdmStatusLevel',
'CdmValidationStep'
]
|
py | 1a38d646452893000cd71f7519240aaeeddd467b | # Generated by Django 2.0.5 on 2018-08-17 21:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bCoreRiskApp', '0010_auto_20180817_2003'),
]
operations = [
migrations.RemoveField(
model_name='riskfield',
name='field_enum_text',
),
migrations.AddField(
model_name='riskfield',
name='field_enum',
field=models.IntegerField(blank=True, null=True),
),
]
|
py | 1a38d69461d3ccfe45b3ba7db3428be54d7f4891 | import random
from authorize import Customer
from authorize import Transaction
from authorize import AuthorizeResponseError
from datetime import date, timedelta
from nose.plugins.attrib import attr
from unittest import TestCase
FULL_CARD_NOT_PRESENT_TRANSACTION = {
'credit_card': {
'card_number': '4111111111111111',
'card_code': '523',
'expiration_month': '04',
'expiration_year': date.today().year + 1,
},
'email': '[email protected]',
'shipping': {
'first_name': 'Rob',
'last_name': 'Oteron',
'company': 'Robotron Studios',
'address': '101 Computer Street',
'city': 'Tucson',
'state': 'AZ',
'zip': '85704',
'country': 'US',
},
'billing': {
'first_name': 'Rob',
'last_name': 'Oteron',
'company': 'Robotron Studios',
'address': '101 Computer Street',
'city': 'Tucson',
'state': 'AZ',
'zip': '85704',
'country': 'US',
'phone_number': '520-123-4567',
'fax_number': '520-456-7890',
},
'tax': {
'amount': 45.00,
'name': 'Double Taxation Tax',
'description': 'Another tax for paying double tax',
},
'duty': {
'amount': 90.00,
'name': 'The amount for duty',
'description': 'I can''t believe you would pay for duty',
},
'line_items': [{
'item_id': 'CIR0001',
'name': 'Circuit Board',
'description': 'A brand new robot component',
'quantity': 5,
'unit_price': 99.99,
'taxable': 'true',
}, {
'item_id': 'CIR0002',
'name': 'Circuit Board 2.0',
'description': 'Another new robot component',
'quantity': 1,
'unit_price': 86.99,
'taxable': 'true',
}, {
'item_id': 'SCRDRVR',
'name': 'Screwdriver',
'description': 'A basic screwdriver',
'quantity': 1,
'unit_price': 10.00,
'taxable': 'true',
}],
'order': {
'invoice_number': 'INV0001',
'description': 'Just another invoice...',
},
'shipping_and_handling': {
'amount': 10.00,
'name': 'UPS 2-Day Shipping',
'description': 'Handle with care',
},
'extra_options': {
'customer_ip': '100.0.0.1',
},
'tax_exempt': False,
'recurring': True,
}
FULL_CARD_PRESENT_TRANSACTION = {
'track_data': {
'track_1': "%B4111111111111111^OTERON/ROB^{0:%y%m}101^?".format(date.today() + timedelta(days=365)),
'track_2': ";4111111111111111={0:%y%m}101?".format(date.today() + timedelta(days=365)),
},
'retail': {
'market_type': 2,
'device_type': 1,
},
'email': '[email protected]',
'shipping': {
'first_name': 'Rob',
'last_name': 'Oteron',
'company': 'Robotron Studios',
'address': '101 Computer Street',
'city': 'Tucson',
'state': 'AZ',
'zip': '85704',
'country': 'US',
},
'billing': {
'first_name': 'Rob',
'last_name': 'Oteron',
'company': 'Robotron Studios',
'address': '101 Computer Street',
'city': 'Tucson',
'state': 'AZ',
'zip': '85704',
'country': 'US',
'phone_number': '520-123-4567',
'fax_number': '520-456-7890',
},
'tax': {
'amount': 45.00,
'name': 'Double Taxation Tax',
'description': 'Another tax for paying double tax',
},
'duty': {
'amount': 90.00,
'name': 'The amount for duty',
'description': 'I can''t believe you would pay for duty',
},
'line_items': [{
'item_id': 'CIR0001',
'name': 'Circuit Board',
'description': 'A brand new robot component',
'quantity': 5,
'unit_price': 99.99,
'taxable': 'true',
}, {
'item_id': 'CIR0002',
'name': 'Circuit Board 2.0',
'description': 'Another new robot component',
'quantity': 1,
'unit_price': 86.99,
'taxable': 'true',
}, {
'item_id': 'SCRDRVR',
'name': 'Screwdriver',
'description': 'A basic screwdriver',
'quantity': 1,
'unit_price': 10.00,
'taxable': 'true',
}],
'order': {
'invoice_number': 'INV0001',
'description': 'Just another invoice...',
},
'shipping_and_handling': {
'amount': 10.00,
'name': 'UPS 2-Day Shipping',
'description': 'Handle with care',
},
'extra_options': {
'customer_ip': '100.0.0.1',
},
'tax_exempt': False,
'recurring': True,
}
CREDIT_CARD = {
'card_number': '4111111111111111',
'expiration_date': '04/{0}'.format(date.today().year + 1),
'card_code': '343',
}
FULL_CIM_TRANSACTION = {
'amount': 30.00,
'line_items': [{
'item_id': 'CIR0001',
'name': 'Circuit Board',
'description': 'A brand new robot component',
'quantity': 5,
'unit_price': 99.99,
'taxable': True,
}, {
'item_id': 'CIR0002',
'name': 'Circuit Board 2.0',
'description': 'Another new robot component',
'quantity': 1,
'unit_price': 86.99,
'taxable': True,
}, {
'item_id': 'SCRDRVR',
'name': 'Screwdriver',
'description': 'A basic screwdriver',
'quantity': 1,
'unit_price': 10.00,
'taxable': True,
}],
'order': {
'invoice_number': 'INV0001',
'description': 'Just another invoice...',
'order_number': 'PONUM00001',
},
'shipping_and_handling': {
'amount': 10.00,
'name': 'UPS 2-Day Shipping',
'description': 'Handle with care',
},
'tax': {
'amount': 45.00,
'name': 'Double Taxation Tax',
'description': 'Another tax for paying double tax',
},
'duty': {
'amount': 90.00,
'name': 'The amount for duty',
'description': 'I can''t believe you would pay for duty',
},
'extra_options': {
'customer_ip': 'fe80::f4b6:2a88:70fa:f09f',
},
'tax_exempt': False,
'recurring': True,
'card_code': '443',
}
FULL_ACCOUNT_TRANSACTION = {
'bank_account': {
'customer_type': 'individual',
'account_type': 'checking',
'routing_number': '322271627',
'account_number': '00987467838473',
'name_on_account': 'Rob Otron',
'bank_name': 'Evil Bank Co.',
'echeck_type': 'CCD',
},
'email': '[email protected]',
'shipping': {
'first_name': 'Rob',
'last_name': 'Oteron',
'company': 'Robotron Studios',
'address': '101 Computer Street',
'city': 'Tucson',
'state': 'AZ',
'zip': '85704',
'country': 'US',
},
'billing': {
'first_name': 'Rob',
'last_name': 'Oteron',
'company': 'Robotron Studios',
'address': '101 Computer Street',
'city': 'Tucson',
'state': 'AZ',
'zip': '85704',
'country': 'US',
'phone_number': '520-123-4567',
'fax_number': '520-456-7890',
},
'tax': {
'amount': 45.00,
'name': 'Double Taxation Tax',
'description': 'Another tax for paying double tax',
},
'duty': {
'amount': 90.00,
'name': 'The amount for duty',
'description': 'I can''t believe you would pay for duty',
},
'line_items': [{
'item_id': 'CIR0001',
'name': 'Circuit Board',
'description': 'A brand new robot component',
'quantity': 5,
'unit_price': 99.99,
'taxable': 'true',
}, {
'item_id': 'CIR0002',
'name': 'Circuit Board 2.0',
'description': 'Another new robot component',
'quantity': 1,
'unit_price': 86.99,
'taxable': 'true',
}, {
'item_id': 'SCRDRVR',
'name': 'Screwdriver',
'description': 'A basic screwdriver',
'quantity': 1,
'unit_price': 10.00,
'taxable': 'true',
}],
'order': {
'invoice_number': 'INV0001',
'description': 'Just another invoice...',
},
'shipping_and_handling': {
'amount': 10.00,
'name': 'UPS 2-Day Shipping',
'description': 'Handle with care',
},
'extra_options': {
'customer_ip': '100.0.0.1',
},
'tax_exempt': False,
'recurring': True,
}
BANK_ACCOUN_TRANSACTION = {
'bank_account': {
'routing_number': '322271627',
'account_number': '00987467838473',
'name_on_account': 'Rob Otron',
}
}
CUSTOMER = {
'credit_card': CREDIT_CARD
}
REFUND_TRANSACTION = {
'amount': 2222.00,
'transaction_id': '2197513033',
'last_four': '1111',
}
@attr('live_tests')
class TransactionTests(TestCase):
def test_live_cim_sale_transaction(self):
result = Customer.create(CUSTOMER)
transaction = FULL_CARD_NOT_PRESENT_TRANSACTION.copy()
transaction['customer_id'] = result.customer_id
transaction['payment_id'] = result.payment_ids[0]
# Create CIM sale transaction. If another sale is attempted too quickly,
# an error will be thrown.
transaction['amount'] = random.randrange(100, 100000) / 100.0
result = Transaction.sale(transaction)
self.assertRaises(AuthorizeResponseError, Transaction.sale, transaction)
# Read transaction details
Transaction.details(result.transaction_response.trans_id)
def test_live_card_not_present_aim_sale_transaction(self):
# Create AIM sale transaction
transaction = FULL_CARD_NOT_PRESENT_TRANSACTION.copy()
transaction['amount'] = random.randrange(100, 100000) / 100.0
result = Transaction.sale(transaction)
# Read transaction details
Transaction.details(result.transaction_response.trans_id)
def test_live_card_present_aim_sale_transaction(self):
# Create AIM sale transaction
transaction = FULL_CARD_PRESENT_TRANSACTION.copy()
transaction['amount'] = random.randrange(100, 100000) / 100.0
result = Transaction.sale(transaction)
# Read transaction details
Transaction.details(result.transaction_response.trans_id)
def test_live_cim_auth_transaction(self):
result = Customer.create(CUSTOMER)
transaction = FULL_CIM_TRANSACTION.copy()
transaction['customer_id'] = result.customer_id
transaction['payment_id'] = result.payment_ids[0]
transaction['amount'] = random.randrange(100, 100000) / 100.0
result = Transaction.auth(transaction)
# Read transaction details
result = Transaction.details(result.transaction_response.trans_id)
self.assertEqual(result.transaction.order.order_number, 'PONUM00001')
def test_auth_and_settle_card_not_present_transaction(self):
transaction = FULL_CARD_NOT_PRESENT_TRANSACTION.copy()
amount = random.randrange(100, 100000) / 100.0
transaction['amount'] = amount
result = Transaction.auth(transaction)
Transaction.settle(result.transaction_response.trans_id)
transaction_details = Transaction.details(result.transaction_response.trans_id)
self.assertEqual(transaction_details.transaction.auth_amount, "%.2f" % amount)
self.assertEqual(transaction_details.transaction.settle_amount, "%.2f" % amount)
def test_auth_and_settle_card_not_present_transaction_with_amount(self):
transaction = FULL_CARD_NOT_PRESENT_TRANSACTION.copy()
amount = random.randrange(100, 100000) / 100.0
settle_amount = amount - 0.9
transaction['amount'] = amount
result = Transaction.auth(transaction)
Transaction.settle(result.transaction_response.trans_id, settle_amount)
transaction_details = Transaction.details(result.transaction_response.trans_id)
self.assertEqual(transaction_details.transaction.auth_amount, '%.2f' % amount)
self.assertEqual(transaction_details.transaction.settle_amount, '%.2f' % settle_amount)
def test_auth_and_settle_card_present_transaction(self):
transaction = FULL_CARD_PRESENT_TRANSACTION.copy()
amount = random.randrange(100, 100000) / 100.0
transaction['amount'] = amount
result = Transaction.auth(transaction)
Transaction.settle(result.transaction_response.trans_id, amount)
transaction_details = Transaction.details(result.transaction_response.trans_id)
self.assertEqual(transaction_details.transaction.auth_amount, '%.2f' % amount)
self.assertEqual(transaction_details.transaction.settle_amount, '%.2f' % amount)
def test_auth_and_settle_card_present_transaction_with_amount(self):
transaction = FULL_CARD_PRESENT_TRANSACTION.copy()
amount = random.randrange(100, 100000) / 100.0
settle_amount = amount - 0.9
transaction['amount'] = amount
result = Transaction.auth(transaction)
Transaction.settle(result.transaction_response.trans_id, settle_amount)
transaction_details = Transaction.details(result.transaction_response.trans_id)
self.assertEqual(transaction_details.transaction.auth_amount, "%.2f" % amount)
self.assertEqual(transaction_details.transaction.settle_amount, "%.2f" % settle_amount)
def test_credit(self):
result = Customer.create(CUSTOMER)
credit = {
'amount': 40.00
}
credit['customer_id'] = result.customer_id
credit['payment_id'] = result.payment_ids[0]
Transaction.credit(credit)
def test_refund_transaction(self):
# Refunds will only work with settled transactions. We don't have a
# settled transaction and so will check the exception that's thrown
transaction = FULL_CARD_NOT_PRESENT_TRANSACTION.copy()
transaction['amount'] = random.randrange(100, 100000) / 100.0
result = Transaction.auth(transaction)
self.assertRaises(AuthorizeResponseError, Transaction.refund, REFUND_TRANSACTION)
def test_void_transaction(self):
transaction = FULL_CARD_NOT_PRESENT_TRANSACTION.copy()
transaction['amount'] = random.randrange(100, 100000) / 100.0
result = Transaction.sale(transaction)
Transaction.void(result.transaction_response.trans_id)
def test_transaction_details(self):
transaction = FULL_CARD_NOT_PRESENT_TRANSACTION.copy()
transaction['amount'] = random.randrange(100, 100000) / 100.0
result = Transaction.sale(transaction)
Transaction.details(result.transaction_response.trans_id)
def test_transaction_response_error_handling(self):
# Issue 21: Hanlde transaction response errors which are different
# transaction errors. By running a bank account transaction over
# $200, we can replicate this strange processing behavior.
transaction = BANK_ACCOUN_TRANSACTION.copy()
transaction['amount'] = random.randrange(2001, 100000) / 100.0
self.assertRaises(AuthorizeResponseError, Transaction.sale, transaction)
def test_list_unsettled_transactions(self):
Transaction.list()
def test_list_transactions_by_batch(self):
self.assertRaises(AuthorizeResponseError, Transaction.list, 'Bad batch ID')
|
py | 1a38d802ef5ddb1d8632a4b48e85baebadf88703 | # @file LibraryClassCheck.py
#
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import logging
import os
from edk2toolext.environment.plugintypes.ci_build_plugin import ICiBuildPlugin
from edk2toollib.uefi.edk2.parsers.dec_parser import DecParser
from edk2toollib.uefi.edk2.parsers.inf_parser import InfParser
from edk2toolext.environment.var_dict import VarDict
class LibraryClassCheck(ICiBuildPlugin):
"""
A CiBuildPlugin that scans the code tree and library classes for undeclared
files
Configuration options:
"LibraryClassCheck": {
IgnoreHeaderFile: [], # Ignore a file found on disk
IgnoreLibraryClass: [] # Ignore a declaration found in dec file
}
"""
def GetTestName(self, packagename: str, environment: VarDict) -> tuple:
""" Provide the testcase name and classname for use in reporting
testclassname: a descriptive string for the testcase can include whitespace
classname: should be patterned <packagename>.<plugin>.<optionally any unique condition>
Args:
packagename: string containing name of package to build
environment: The VarDict for the test to run in
Returns:
a tuple containing the testcase name and the classname
(testcasename, classname)
"""
return ("Check library class declarations in " + packagename, packagename + ".LibraryClassCheck")
def __GetPkgDec(self, rootpath):
try:
allEntries = os.listdir(rootpath)
for entry in allEntries:
if entry.lower().endswith(".dec"):
return(os.path.join(rootpath, entry))
except Exception:
logging.error("Unable to find DEC for package:{0}".format(rootpath))
return None
##
# External function of plugin. This function is used to perform the task of the MuBuild Plugin
#
# - package is the edk2 path to package. This means workspace/packagepath relative.
# - edk2path object configured with workspace and packages path
# - PkgConfig Object (dict) for the pkg
# - EnvConfig Object
# - Plugin Manager Instance
# - Plugin Helper Obj Instance
# - Junit Logger
# - output_stream the StringIO output stream from this plugin via logging
def RunBuildPlugin(self, packagename, Edk2pathObj, pkgconfig, environment, PLM, PLMHelper, tc, output_stream=None):
overall_status = 0
LibraryClassIgnore = []
abs_pkg_path = Edk2pathObj.GetAbsolutePathOnThisSystemFromEdk2RelativePath(packagename)
abs_dec_path = self.__GetPkgDec(abs_pkg_path)
wsr_dec_path = Edk2pathObj.GetEdk2RelativePathFromAbsolutePath(abs_dec_path)
if abs_dec_path is None or wsr_dec_path == "" or not os.path.isfile(abs_dec_path):
tc.SetSkipped()
tc.LogStdError("No DEC file {0} in package {1}".format(abs_dec_path, abs_pkg_path))
return -1
# Get all include folders
dec = DecParser()
dec.SetBaseAbsPath(Edk2pathObj.WorkspacePath).SetPackagePaths(Edk2pathObj.PackagePathList)
dec.ParseFile(wsr_dec_path)
AllHeaderFiles = []
for includepath in dec.IncludePaths:
## Get all header files in the library folder
AbsLibraryIncludePath = os.path.join(abs_pkg_path, includepath, "Library")
if(not os.path.isdir(AbsLibraryIncludePath)):
continue
hfiles = self.WalkDirectoryForExtension([".h"], AbsLibraryIncludePath)
hfiles = [os.path.relpath(x,abs_pkg_path) for x in hfiles] # make package root relative path
hfiles = [x.replace("\\", "/") for x in hfiles] # make package relative path
AllHeaderFiles.extend(hfiles)
if len(AllHeaderFiles) == 0:
tc.SetSkipped()
tc.LogStdError(f"No Library include folder in any Include path")
return -1
# Remove ignored paths
if "IgnoreHeaderFile" in pkgconfig:
for a in pkgconfig["IgnoreHeaderFile"]:
try:
tc.LogStdOut("Ignoring Library Header File {0}".format(a))
AllHeaderFiles.remove(a)
except:
tc.LogStdError("LibraryClassCheck.IgnoreHeaderFile -> {0} not found. Invalid Header File".format(a))
logging.info("LibraryClassCheck.IgnoreHeaderFile -> {0} not found. Invalid Header File".format(a))
if "IgnoreLibraryClass" in pkgconfig:
LibraryClassIgnore = pkgconfig["IgnoreLibraryClass"]
## Attempt to find library classes
for lcd in dec.LibraryClasses:
## Check for correct file path separator
if "\\" in lcd.path:
tc.LogStdError("LibraryClassCheck.DecFilePathSeparator -> {0} invalid.".format(lcd.path))
logging.error("LibraryClassCheck.DecFilePathSeparator -> {0} invalid.".format(lcd.path))
overall_status += 1
continue
if lcd.name in LibraryClassIgnore:
tc.LogStdOut("Ignoring Library Class Name {0}".format(lcd.name))
LibraryClassIgnore.remove(lcd.name)
continue
logging.debug(f"Looking for Library Class {lcd.path}")
try:
AllHeaderFiles.remove(lcd.path)
except ValueError:
tc.LogStdError(f"Library {lcd.name} with path {lcd.path} not found in package filesystem")
logging.error(f"Library {lcd.name} with path {lcd.path} not found in package filesystem")
overall_status += 1
## any remaining AllHeaderFiles are not described in DEC
for h in AllHeaderFiles:
tc.LogStdError(f"Library Header File {h} not declared in package DEC {wsr_dec_path}")
logging.error(f"Library Header File {h} not declared in package DEC {wsr_dec_path}")
overall_status += 1
## Warn about any invalid library class names in the ignore list
for r in LibraryClassIgnore:
tc.LogStdError("LibraryClassCheck.IgnoreLibraryClass -> {0} not found. Library Class not found".format(r))
logging.info("LibraryClassCheck.IgnoreLibraryClass -> {0} not found. Library Class not found".format(r))
# If XML object exists, add result
if overall_status != 0:
tc.SetFailed("LibraryClassCheck {0} Failed. Errors {1}".format(wsr_dec_path, overall_status), "CHECK_FAILED")
else:
tc.SetSuccess()
return overall_status
|
py | 1a38d80bbfeee866c06ae0c647b722c014682665 | #!/usr/bin/env python
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates script-specific samples (collections of chars) using cldr
exemplar data for languages written in a script."""
import argparse
import codecs
import collections
import locale
import os
from os import path
import re
import shutil
import xml.etree.cElementTree as ElementTree
from nototools import cldr_data
from nototools import create_image
from nototools import extra_locale_data
from nototools import notoconfig
from nototools import tool_utils
from nototools import unicode_data
try:
from icu import Locale, Collator
print 'will use icu locale-specific order'
_HAVE_ICU = True
except ImportError as e:
print 'will use default locale sort order'
_HAVE_ICU = False
NOTO_TOOLS = path.abspath(path.join(path.dirname(__file__), os.pardir))
CLDR_DIR = path.join(NOTO_TOOLS, 'third_party', 'cldr')
_VERBOSE = False
def get_script_to_exemplar_data_map():
"""Return a map from script to 3-tuples of:
- locale tuple (lang, script, region, variant)
- cldr_relative path to src of exemplar data
- tuple of the exemplar chars"""
script_map = collections.defaultdict(dict)
for directory in ['common', 'seed', 'exemplars']:
data_dir = path.join(directory, 'main')
for filename in os.listdir(path.join(CLDR_DIR, data_dir)):
if not filename.endswith('.xml'):
continue
exemplar_list = cldr_data.get_exemplar_from_file(path.join(data_dir, filename))
if not exemplar_list:
if _VERBOSE:
print ' no exemplar list for %s' % path.join(data_dir, filename)
continue
lsrv = cldr_data.loc_tag_to_lsrv(filename[:-4])
if not lsrv:
if _VERBOSE:
print ' no lsrv for %s' % path.join(data_dir, filename)
continue
src = path.join(directory, filename)
script = lsrv[1]
if not script:
if _VERBOSE:
print ' no script for %s' % path.join(data_dir, filename)
continue
loc_tag = cldr_data.lsrv_to_loc_tag(lsrv)
loc_to_exemplar_info = script_map[script]
if loc_tag in loc_to_exemplar_info:
if _VERBOSE:
print 'skipping %s, already have exemplars for %s from %s' % (
src, loc_tag, loc_to_exemplar_info[loc_tag][1])
continue
# fix exemplars that look incorrect
if script == 'Arab' and 'd' in exemplar_list:
if _VERBOSE:
print 'found \'d\' in %s for %s' % (src, lsrv)
no_latin = True
else:
no_latin = False
# exclude exemplar strings, and restrict to letters and digits
def accept_cp(cp):
if len(cp) != 1:
return False
cat = unicode_data.category(cp)
if cat[0] != 'L' and cat != 'Nd':
return False
if no_latin and cp in 'df':
return False
return True
filtered_exemplar_list = filter(accept_cp, exemplar_list)
# some exemplar lists don't surround strings with curly braces, and end up
# with duplicate characters. Flag these
exemplar_chars = set()
dup_chars = set()
fixed_exemplar_list = []
for cp in filtered_exemplar_list:
if cp in exemplar_chars:
dup_chars.add(cp)
else:
exemplar_chars.add(cp)
fixed_exemplar_list.append(cp)
if len(dup_chars) > 0 and _VERBOSE:
print 'duplicate exemplars in %s: %s' % (
src, ', '.join([u'\u200e%s\u200e (%x)' % (cp, ord(cp)) for cp in dup_chars]))
loc_to_exemplar_info[loc_tag] = (lsrv, src, tuple(fixed_exemplar_list))
# supplement with extra locale data
for loc_tag in extra_locale_data.EXEMPLARS:
exemplar_list = cldr_data.get_exemplar_from_extra_data(loc_tag)
lang, script = loc_tag.split('-')
lsrv = (lang, script, None, None)
loc_to_exemplar_info = script_map[script]
src = '[extra locale data]/%s' % loc_tag
if loc_tag in loc_to_exemplar_info:
if _VERBOSE:
print 'skipping %s, already have exemplars for %s from %s' % (
src, loc_tag, loc_to_exemplar_info[loc_tag][1])
continue
# restrict to letters, except for zsym
def accept_cp(cp):
cat = unicode_data.category(cp)
return cat[0] == 'L' or cat == 'Nd'
if 'Zsym' not in loc_tag:
filtered_exemplar_list = filter(accept_cp, exemplar_list)
if len(filtered_exemplar_list) != len(exemplar_list) and _VERBOSE:
print 'filtered some characters from %s' % src
else:
filtered_exemplar_list = exemplar_list
loc_to_exemplar_info[loc_tag] = (lsrv, src, tuple(filtered_exemplar_list))
return script_map
def show_rarely_used_char_info(script, loc_map, char_to_lang_map):
# let's list chars unique to each language
for loc_tag in sorted(loc_map):
unique_chars = []
dual_chars = []
dual_shared_with = set()
triple_chars = []
triple_shared_with = set()
info = loc_map[loc_tag]
exemplars = info[2]
for cp in exemplars:
num_common_langs = len(char_to_lang_map[cp])
if num_common_langs == 1:
unique_chars.append(cp)
elif num_common_langs == 2:
dual_chars.append(cp)
for shared_loc_tag in char_to_lang_map[cp]:
if shared_loc_tag != loc_tag:
dual_shared_with.add(shared_loc_tag)
elif num_common_langs == 3:
triple_chars.append(cp)
for shared_loc_tag in char_to_lang_map[cp]:
if shared_loc_tag != loc_tag:
triple_shared_with.add(shared_loc_tag)
script_tag = '-' + script
if unique_chars:
print '%s has %d unique chars: %s%s' % (
loc_tag, len(unique_chars), ' '.join(unique_chars[:100]),
'...' if len(unique_chars) > 100 else '')
if dual_chars:
print '%s shares %d chars (%s%s) with 1 other lang: %s' % (
loc_tag, len(dual_chars), ' '.join(dual_chars[:20]),
'...' if len(dual_chars) > 20 else '',
', '.join(sorted([loc.replace(script_tag, '') for loc in dual_shared_with])))
if triple_chars:
print '%s shares %d chars (%s%s) with 2 other langs: %s' % (
loc_tag, len(triple_chars), ' '.join(triple_chars[:20]),
'...' if len(triple_chars) > 20 else '',
', '.join(sorted([loc.replace(script_tag, '') for loc in triple_shared_with])))
if not (unique_chars or dual_chars or triple_chars):
print '%s shares all chars with 3+ other langs' % loc_tag
def get_char_to_lang_map(loc_map):
char_to_lang_map = collections.defaultdict(list)
for loc_tag in sorted(loc_map):
info = loc_map[loc_tag]
exemplars = info[2]
for cp in exemplars:
if loc_tag in char_to_lang_map[cp]:
print 'loc %s (from %s) already in char_to_lang_map for %s (%x)' % (
loc_tag, info[1], cp, ord(cp))
else:
char_to_lang_map[cp].append(loc_tag)
return char_to_lang_map
def char_lang_info(num_locales, char_to_lang_map):
"""Returns a tuple containing
- characters ordered by the number of langs that use them
- a list mapping number of shared langs to number of chars shared by those langs"""
freq_list = []
hist = [0] * (num_locales + 1)
for cp in char_to_lang_map:
num_shared_langs = len(char_to_lang_map[cp])
if num_shared_langs >= len(hist):
for shared_lang in char_to_lang_map[cp]:
if shared_lang not in loc_map:
print 'loc map does not have \'%s\'!' % shared_lang
freq_list.append((num_shared_langs, cp))
if num_shared_langs >= len(hist):
print 'num shared langs is %d but size of hist is %d' % (num_shared_langs, len(hist))
hist[num_shared_langs] += 1
freq_list.sort()
return [cp for nl, cp in freq_list], hist
def show_char_use_info(script, chars_by_num_langs, char_to_lang_map):
script_tag = '-' + script
for cp in chars_by_num_langs:
langs = char_to_lang_map[cp]
count = len(langs)
limit = 12
without_script = [loc.replace(script_tag, '') for loc in langs[:limit]]
without_script_str = ', '.join(sorted(without_script))
if count > limit:
without_script_str += '...'
print u'char %s\u200e (%x): %d %s' % (cp, ord(cp), count, without_script_str)
print 'total chars listed: %d' % len(char_to_lang_map)
def show_shared_langs_hist(hist):
# histogram - number of chars per number of shared languages
for i in range(1, len(hist)):
print '[%3d] %3d %s' % (i, hist[i], 'x' * hist[i])
def get_upper_case_list(char_list):
"""Return the upper case versions where they differ.
If no char in the list is a lower case variant, the result is empty."""
# keep in same order as input list.
upper_case_chars = []
for cp in char_list:
upcp = unicode_data.to_upper(cp)
if upcp != cp:
upper_case_chars.append(upcp)
return upper_case_chars
def show_tiers(char_list, num_tiers, tier_size):
for tier in range(1, num_tiers + 1):
if tier == 1:
subset = char_list[-tier_size:]
else:
subset = char_list[tier * -tier_size:(tier-1) * -tier_size]
if not subset:
break
tier_chars = sorted(subset)
print 'tier %d: %s' % (tier, ' '.join(tier_chars))
upper_case_chars = get_upper_case_list(tier_chars)
if upper_case_chars:
print ' upper: ' + ' '.join(upper_case_chars)
def get_rare_char_info(char_to_lang_map, shared_lang_threshold):
"""Returns a tuple of:
- a set of 'rare_chars' (those used threshold langs or fewer),
- a mapping from each locale with rare chars to a set of its rare chars"""
rare_chars = set()
locs_with_rare_chars = collections.defaultdict(set)
for cp in char_to_lang_map:
num_shared_langs = len(char_to_lang_map[cp])
if num_shared_langs <= shared_lang_threshold:
rare_chars.add(cp)
for lang_tag in char_to_lang_map[cp]:
locs_with_rare_chars[lang_tag].add(cp)
return rare_chars, locs_with_rare_chars
_lang_for_script_map = {}
def _init_lang_for_script_map():
locs_by_lit_pop = [loc for _, loc in cldr_data.get_lang_scrs_by_decreasing_global_lit_pop()]
for t in locs_by_lit_pop:
lsrv = cldr_data.loc_tag_to_lsrv(t)
script = lsrv[1]
if script not in _lang_for_script_map:
lang = lsrv[0]
# print '%s lang => %s' % (script, lang)
_lang_for_script_map[script] = lang
def lang_for_script(script):
"""Return the most common language for a script based on literate population."""
# should use likely subtag data for this.
# the current code assumes all we want is lang -> script, I'd have to change
# it to map locale->locale. Right now I dont' get Hant -> zh_Hant, only
# Hant -> zh, which isn't good enough I think.
if not _lang_for_script_map:
_init_lang_for_script_map()
return _lang_for_script_map.get(script)
def select_rare_chars_for_loc(script, locs_with_rare_chars, shared_lang_threshold,
char_to_lang_map):
"""Return a list of 2-tuples of loc and selected rare chars,
ordered by decreasing literate population of the locale."""
rarity_threshold_map = {}
for lang_tag in locs_with_rare_chars:
rarity_threshold_map[lang_tag] = shared_lang_threshold
selected = []
locs_by_lit_pop = [loc for _, loc in cldr_data.get_lang_scrs_by_decreasing_global_lit_pop()]
# examine locales in decreasing order of literate population
for loc_tag in locs_by_lit_pop:
if script not in loc_tag:
continue
loc_tag = loc_tag.replace('_', '-')
if loc_tag not in locs_with_rare_chars:
continue
most_specific_chars = set()
most_specific_chars_count = rarity_threshold_map[loc_tag]
# From the rare chars for this locale, select those that
# are most specific to this language. In most cases they
# are unique to this language.
for cp in locs_with_rare_chars[loc_tag]:
num_chars = len(char_to_lang_map[cp])
if num_chars <= most_specific_chars_count:
if num_chars < most_specific_chars_count:
most_specific_chars = set()
most_specific_chars.add(cp)
most_specific_chars_count = num_chars
if most_specific_chars:
selected.append((loc_tag, most_specific_chars))
for cp in most_specific_chars:
for tag in char_to_lang_map[cp]:
if rarity_threshold_map[tag] > most_specific_chars_count:
rarity_threshold_map[tag] = most_specific_chars_count
return selected
def show_selected_rare_chars(selected):
print 'langs with rare chars by lang pop:'
for lang_tag, chars in selected:
print '%10s: %s' % (lang_tag, ', '.join(sorted(chars)))
def sort_for_script(cp_list, script):
lang = lang_for_script(script)
if not lang:
print 'cannot sort for script, no lang for %s' % script
return cp_list
if _HAVE_ICU:
from icu import Locale, Collator
loc = Locale(lang + '_' + script)
col = Collator.createInstance(loc)
return sorted(cp_list, cmp=col.compare)
else:
import locale
return sorted(cp_list, cmp=locale.strcoll)
def addcase(sample, script):
cased_sample = []
for cp in sample:
ucp = unicode_data.to_upper(cp)
if ucp != cp and ucp not in sample: # Copt has cased chars paired in the block
cased_sample.append(ucp)
if cased_sample:
cased_sample = ' '.join(cased_sample)
if _VERBOSE:
print 'add case for %s' % script
return sample + '\n' + cased_sample
return sample
def _generate_excluded_characters():
# Some of these exclusions are desired, and some are reluctantly applied because
# Noto currently does not support some characters. We use the generated
# data as fallback samples on a per-script and not per-font basis, which is also
# a problem.
# Religious characters
# deva OM, Arabic pbuh, bismillah
codepoints = [0x950, 0xfdfa, 0xfdfd]
# Cyrillic characters not in sans or serif
codepoints.append(0x2e2f)
for cp in range(0xa640, 0xa680):
codepoints.append(cp)
# Arabic character not in kufi
codepoints.append(0x08a0)
chars = set()
for cp in codepoints:
chars.add(unichr(cp))
return frozenset(chars)
_EXCLUDE_CHARS = _generate_excluded_characters()
def generate_sample_for_script(script, loc_map):
num_locales = len(loc_map)
if num_locales == 1:
tag, info = loc_map.iteritems().next()
exemplars = info[2]
ex_len = len(exemplars)
info = '%s (1 locale)\nfrom exemplars for %s (%s%d chars)' % (
script, tag, 'first 60 of ' if ex_len > 60 else '', ex_len)
# don't sort, rely on exemplar order
sample = ' '.join(exemplars[:60])
sample = addcase(sample, script)
return sample, info
script_tag = '-' + script
char_to_lang_map = get_char_to_lang_map(loc_map)
if len(char_to_lang_map) <= 60:
info = '%s (%d locales)\nfrom merged exemplars (%d chars) from %s' % (
script, num_locales, len(char_to_lang_map),
', '.join([loc.replace(script_tag, '') for loc in loc_map]))
sample = ' '.join(sort_for_script(list(char_to_lang_map), script))
sample = addcase(sample, script)
return sample, info
# show_rarely_used_char_info(script, loc_map, char_to_lang_map)
chars_by_num_langs, num_langs_to_num_chars = char_lang_info(
num_locales, char_to_lang_map)
# show_char_use_info(chars_by_num_langs, char_to_lang_map)
# show_shared_langs_hist(num_langs_to_num_chars)
# show_tiers(chars_by_num_langs, 3, 40)
shared_lang_threshold = min(7, num_locales)
rare_chars, locs_with_rare_chars = get_rare_char_info(
char_to_lang_map, shared_lang_threshold)
selected = select_rare_chars_for_loc(script,
locs_with_rare_chars, shared_lang_threshold, char_to_lang_map)
# show_selected_rare_chars(selected)
chars_by_num_langs = [cp for cp in chars_by_num_langs if cp not in _EXCLUDE_CHARS]
chosen_chars = list(chars_by_num_langs)[-60:]
rare_extension = []
for _, chars in selected:
avail_chars = [cp for cp in chars if cp not in chosen_chars and
cp not in rare_extension and cp not in _EXCLUDE_CHARS]
rare_extension.extend(sorted(avail_chars)[:4]) # vietnamese dominates latin otherwise
if len(rare_extension) > 20:
break
chosen_chars = chosen_chars[:60 - len(rare_extension)]
chosen_chars.extend(rare_extension)
info = ('%s (%d locales)\n'
'from most common exemplars plus chars specific to most-read languages' % (
script, num_locales))
sample = ' '.join(sort_for_script(chosen_chars, script))
sample = addcase(sample, script)
return sample, info
def generate_samples(dstdir, imgdir, summary):
if imgdir:
imgdir = tool_utils.ensure_dir_exists(imgdir)
print 'writing images to %s' % imgdir
if dstdir:
dstdir = tool_utils.ensure_dir_exists(dstdir)
print 'writing files to %s' % dstdir
verbose = summary
script_map = get_script_to_exemplar_data_map()
for script in sorted(script_map):
sample, info = generate_sample_for_script(script, script_map[script])
if summary:
print
print info
print sample
if imgdir:
path = os.path.join(imgdir, 'und-%s_chars.png' % script)
print 'writing image %s.png' % script
rtl = script in ['Adlm', 'Arab', 'Hebr', 'Nkoo', 'Syrc', 'Tfng', 'Thaa']
create_image.create_png(
sample, path, font_size=34, line_spacing=40, width=800, rtl=rtl)
if dstdir:
filename = 'und-%s_chars.txt' % script
print 'writing data %s' % filename
filepath = os.path.join(dstdir, filename)
with codecs.open(filepath, 'w', 'utf-8') as f:
f.write(sample + '\n')
def main():
default_dstdir = os.path.join(NOTO_TOOLS, 'sample_texts')
parser = argparse.ArgumentParser()
parser.add_argument('--dstdir', help='where to write samples (default %s)' %
default_dstdir, default=default_dstdir, metavar='dir')
parser.add_argument('--imgdir', help='if defined, generate images in this dir',
metavar='dir')
parser.add_argument('--save', help='write sample files in dstdir', action='store_true')
parser.add_argument('--summary', help='output list of samples and how they were generated',
action='store_true')
parser.add_argument('--verbose', help='print warnings and extra info', action='store_true')
args = parser.parse_args()
if not args.save and not args.imgdir and not args.summary:
print 'nothing to do.'
return
if args.verbose:
global _VERBOSE
_VERBOSE = True
generate_samples(args.dstdir if args.save else None, args.imgdir, args.summary)
if __name__ == '__main__':
locale.setlocale(locale.LC_COLLATE, 'en_US.UTF-8')
main()
|
py | 1a38d821641e7d534bfe107ec1e5a5ef4e07ada4 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2020-05-07 16:28
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blablaCar', '0021_auto_20200416_2303'),
]
operations = [
migrations.AlterModelOptions(
name='reservation',
options={'permissions': [('can_see_trajet_booked', 'can_delete_trajet_booked')]},
),
]
|
py | 1a38d8b1266892c3dd90d6b38a32d06a9516bdb5 | """
I/O for DOLFIN's XML format, cf.
<https://people.sc.fsu.edu/~jburkardt/data/dolfin_xml/dolfin_xml.html>.
"""
import logging
import os
import pathlib
import re
from xml.etree import ElementTree as ET
import numpy
from .._exceptions import ReadError, WriteError
from .._helpers import register
from .._mesh import Mesh
def _read_mesh(filename):
dolfin_to_meshio_type = {"triangle": ("triangle", 3), "tetrahedron": ("tetra", 4)}
# Use iterparse() to avoid loading the entire file via parse(). iterparse()
# allows to discard elements (via clear()) after they have been processed.
# See <https://stackoverflow.com/a/326541/353337>.
dim = None
points = None
keys = None
cell_type = None
num_nodes_per_cell = None
for event, elem in ET.iterparse(filename, events=("start", "end")):
if event == "end":
continue
if elem.tag == "dolfin":
# Don't be too strict with the assertion. Some mesh files don't have the
# proper tags.
# assert elem.attrib['nsmap'] \
# == '{\'dolfin\': \'https://fenicsproject.org/\'}'
pass
elif elem.tag == "mesh":
dim = int(elem.attrib["dim"])
cell_type, num_nodes_per_cell = dolfin_to_meshio_type[
elem.attrib["celltype"]
]
cell_tags = [f"v{i}" for i in range(num_nodes_per_cell)]
elif elem.tag == "vertices":
if dim is None:
raise ReadError("Expected `mesh` before `vertices`")
points = numpy.empty((int(elem.attrib["size"]), dim))
keys = ["x", "y"]
if dim == 3:
keys += ["z"]
elif elem.tag == "vertex":
if points is None or keys is None:
raise ReadError("Expected `vertices` before `vertex`")
k = int(elem.attrib["index"])
points[k] = [elem.attrib[key] for key in keys]
elif elem.tag == "cells":
if cell_type is None or num_nodes_per_cell is None:
raise ReadError("Expected `mesh` before `cells`")
cells = [
(
cell_type,
numpy.empty(
(int(elem.attrib["size"]), num_nodes_per_cell), dtype=int
),
)
]
elif elem.tag in ["triangle", "tetrahedron"]:
k = int(elem.attrib["index"])
cells[0][1][k] = [elem.attrib[t] for t in cell_tags]
else:
logging.warning("Unknown entry %s. Ignoring.", elem.tag)
elem.clear()
return points, cells, cell_type
def _read_cell_data(filename):
dolfin_type_to_numpy_type = {
"int": numpy.dtype("int"),
"float": numpy.dtype("float"),
"uint": numpy.dtype("uint"),
}
cell_data = {}
dir_name = pathlib.Path(filename).resolve().parent
# Loop over all files in the same directory as `filename`.
basename = pathlib.Path(filename).stem
for f in os.listdir(dir_name):
# Check if there are files by the name "<filename>_*.xml"; if yes,
# extract the * pattern and make it the name of the data set.
out = re.match(f"{basename}_([^\\.]+)\\.xml", f)
if not out:
continue
name = out.group(1)
parser = ET.XMLParser()
tree = ET.parse((dir_name / f).as_posix(), parser)
root = tree.getroot()
mesh_functions = list(root)
if len(mesh_functions) != 1:
raise ReadError("Can only handle one mesh function")
mesh_function = mesh_functions[0]
if mesh_function.tag != "mesh_function":
raise ReadError()
size = int(mesh_function.attrib["size"])
dtype = dolfin_type_to_numpy_type[mesh_function.attrib["type"]]
data = numpy.empty(size, dtype=dtype)
for child in mesh_function:
if child.tag != "entity":
raise ReadError()
idx = int(child.attrib["index"])
data[idx] = child.attrib["value"]
if name not in cell_data:
cell_data[name] = []
cell_data[name].append(data)
return cell_data
def read(filename):
points, cells, _ = _read_mesh(filename)
cell_data = _read_cell_data(filename)
return Mesh(points, cells, cell_data=cell_data)
def _write_mesh(filename, points, cell_type, cells):
stripped_cells = [c for c in cells if c.type == cell_type]
meshio_to_dolfin_type = {"triangle": "triangle", "tetra": "tetrahedron"}
if any(c.type != cell_type for c in cells):
discarded_cell_types = {c.type for c in cells if c.type != cell_type}
logging.warning(
"DOLFIN XML can only handle one cell type at a time. "
"Using %s, discarding %s.",
cell_type,
", ".join(discarded_cell_types),
)
dim = points.shape[1]
if dim not in [2, 3]:
raise WriteError(f"Can only write dimension 2, 3, got {dim}.")
coord_names = ["x", "y"]
if dim == 3:
coord_names += ["z"]
with open(filename, "w") as f:
f.write("<dolfin nsmap=\"{'dolfin': 'https://fenicsproject.org/'}\">\n")
ct = meshio_to_dolfin_type[cell_type]
f.write(f' <mesh celltype="{ct}" dim="{dim}">\n')
num_points = len(points)
f.write(f' <vertices size="{num_points}">\n')
for idx, point in enumerate(points):
s = " ".join(f'{xyz}="{p}"' for xyz, p in zip("xyz", point))
f.write(f' <vertex index="{idx}" {s} />\n')
f.write(" </vertices>\n")
num_cells = 0
for c in stripped_cells:
num_cells += len(c.data)
f.write(f' <cells size="{num_cells}">\n')
idx = 0
for ct, cls in stripped_cells:
type_string = meshio_to_dolfin_type[ct]
for cell in cls:
s = " ".join(f'v{k}="{c}"' for k, c in enumerate(cell))
f.write(f' <{type_string} index="{idx}" {s} />\n')
idx += 1
f.write(" </cells>\n")
f.write(" </mesh>\n")
f.write("</dolfin>")
def _numpy_type_to_dolfin_type(dtype):
types = {
"int": [numpy.int8, numpy.int16, numpy.int32, numpy.int64],
"uint": [numpy.uint8, numpy.uint16, numpy.uint32, numpy.uint64],
"float": [numpy.float16, numpy.float32, numpy.float64],
}
for key, numpy_types in types.items():
for numpy_type in numpy_types:
if numpy.issubdtype(dtype, numpy_type):
return key
raise WriteError("Could not convert NumPy data type to DOLFIN data type.")
def _write_cell_data(filename, dim, cell_data):
dolfin = ET.Element("dolfin", nsmap={"dolfin": "https://fenicsproject.org/"})
mesh_function = ET.SubElement(
dolfin,
"mesh_function",
type=_numpy_type_to_dolfin_type(cell_data.dtype),
dim=str(dim),
size=str(len(cell_data)),
)
for k, value in enumerate(cell_data):
ET.SubElement(mesh_function, "entity", index=str(k), value=repr(value))
tree = ET.ElementTree(dolfin)
tree.write(filename)
def write(filename, mesh):
logging.warning("DOLFIN XML is a legacy format. Consider using XDMF instead.")
if any("tetra" == c.type for c in mesh.cells):
cell_type = "tetra"
elif any("triangle" == c.type for c in mesh.cells):
cell_type = "triangle"
else:
raise WriteError(
"DOLFIN XML only supports triangles and tetrahedra. "
"Consider using XDMF instead."
)
_write_mesh(filename, mesh.points, cell_type, mesh.cells)
for name, lst in mesh.cell_data.items():
for data in lst:
fname = os.path.splitext(filename)[0]
cell_data_filename = f"{fname}_{name}.xml"
dim = 2 if mesh.points.shape[1] == 2 or all(mesh.points[:, 2] == 0) else 3
_write_cell_data(cell_data_filename, dim, numpy.array(data))
register("dolfin-xml", [".xml"], read, {"dolfin-xml": write})
|
py | 1a38d8dfc6ad560ffaef189765c43f37f5bc4623 | import math, sys
try:
from OpenGL.GLUT import *
from OpenGL.GL import *
from OpenGL.GLU import *
except BaseException as e:
print('''ERROR: PyOpenGL not installed properly.''')
print(e)
sys.exit()
from . import formula
HALF_PI = math.pi / 2.0
N_HALF_PIS = [None] * 4
for i in range(4):
n_half_pi = i * HALF_PI
N_HALF_PIS[i] = n_half_pi
N_HALF_PIS = tuple(N_HALF_PIS)
WHITE=(0xff, 0xff, 0xff)
BLACK=(0x00, 0x00, 0x00)
RED=(0xff, 0, 0)
GREEN=(0, 0xff, 0)
BLUE=(0, 0, 0xff)
CYAN=(0x00, 0xff, 0xff)
MAGENTA=(0xff, 0x00, 0xff)
YELLOW=(0xff, 0xff, 0x00)
GLAY=(0x80, 0x80, 0x80)
def put_on_square(r, x, y, leg, color=(0xff, 0xff, 0xff)):
'''rは回転を制御する。x, yは軌道を制御する。'''
glBegin(GL_QUADS)
put_on_squares(r, x, y, leg, color)
glEnd()
def put_on_squares(r, x, y, leg, color=(0xff, 0xff, 0xff)):
'''rは回転を制御する。x, yは軌道を制御する。'''
glColor3ub(*color)
for i in range(4):
wx = x + math.cos(r + N_HALF_PIS[i]) * leg
wy = y + math.sin(r + N_HALF_PIS[i]) * leg
glVertex2f(wx, wy)
def _moving_squares(passed_seconds):
_tick_tack(passed_seconds)
_rotating_square_on_origin(passed_seconds)
_rotating_square_around_origin(passed_seconds)
_moving_square_around_origin(passed_seconds)
_moving_square_around_origin_2(passed_seconds)
_rotating_square_around_origin_3(passed_seconds)
_rotating_square_around_origin_4(passed_seconds)
_rotating_square_around_origin_5(passed_seconds)
_rotating_square_around_origin_6(passed_seconds)
_rotating_square_around_origin_7(passed_seconds)
_rotating_square_around_origin_8(passed_seconds)
_rotating_square_around_origin_9(passed_seconds)
_rotating_square_around_origin_10(passed_seconds)
_rotating_square_around_origin_11(passed_seconds)
_rotating_square_around_origin_12(passed_seconds)
_rotating_square_around_origin_13(passed_seconds)
_rotating_square_around_origin_14(passed_seconds)
_rotating_square_around_origin_15(passed_seconds)
_rotating_square_around_origin_16(passed_seconds)
def _tick_tack(passed_seconds):
'''四角が 0 <= x <= pi / 2 の範囲で右往左往する。'''
moving = formula._fmove(passed_seconds)
glColor3ub(0xff, 0xff, 0xff)
leg = 0.02
# 0 <= moving <= pi / 4
turn = math.pi / 2
q, r = divmod(passed_seconds, turn)
if int(q % 2) == 0:
rad = r
else:
rad = turn - r
x = math.cos(rad) * 0.98
y = math.sin(rad) * 0.98
put_on_square(rad, x, y, leg)
def _moving_square_around_origin_2(passed_seconds):
'''赤四角形が、原点を中心として時計回りで姿勢を変えずくるくる回る。'''
ps = passed_seconds
put_on_square(ps, math.cos(ps) / 4, math.sin(ps) / 4, 0.1, RED)
def _moving_square_around_origin(passed_seconds):
'''緑四角形が、原点を中心として時計回りで姿勢を変えずくるくる回る。'''
ps = passed_seconds
put_on_square(ps, math.cos(-ps) / 2, math.sin(-ps) / 2, 0.1, GREEN)
def _rotating_square_around_origin(passed_seconds):
'''青四角形が、原点を中心として時計回りで姿勢を変えずくるくる回る。'''
ps = passed_seconds
put_on_square(0, math.cos(-ps), math.sin(-ps), 0.1, BLUE)
def _rotating_square_around_origin_3(passed_seconds):
ps = passed_seconds
put_on_square(-ps, math.cos(-ps) * 3 / 4, math.sin(-ps) * 3 / 4,
0.1, CYAN)
def _rotating_square_around_origin_4(passed_seconds):
ps = passed_seconds
put_on_square(-ps, math.cos(ps) * 3 / 8, math.sin(ps) * 3 / 8,
0.1, MAGENTA)
def _rotating_square_around_origin_5(passed_seconds):
'''cos(), sin()の中身を適当にしてみたら横長の楕円軌道でした。'''
ps = passed_seconds
put_on_square(-ps, math.cos(-ps) * 5 / 8, math.sin(ps) * 3 / 8,
0.1, YELLOW)
def _rotating_square_around_origin_6(passed_seconds):
'''cos(), sin()の中身を適当にしてみたら_5の逆軌道でした。'''
ps = passed_seconds
put_on_square(-ps, math.cos(ps) * 5 / 8, math.sin(-ps) * 3 / 8,
0.1, GLAY)
def _rotating_square_around_origin_7(passed_seconds):
'''cos(), sin()の中身を適当に縦長の楕円軌道にする。'''
ps = passed_seconds
put_on_square(-ps, math.cos(ps) * 3 / 8, math.sin(-ps) * 8 / 8,
0.1, WHITE)
'''_5, _6, _7でcos(), sin()の中身を調整して楕円軌道に出来たと思っていまし
たが、そうではありませんでした。軌道の大きさを制御する変数に対して掛ける
係数が等しくないため、楕円軌道となっていました。今気づきました。
'''
def _rotating_square_around_origin_8(passed_seconds):
'''ということで、cos(), sin()の中身だけ変更して
何が起こるのか探ってみます。'''
ps = passed_seconds
put_on_square(-ps, math.cos(-ps) * 3 / 8, math.sin(ps) * 8 / 8,
0.1, RED)
'''楕円軌道が逆楕円軌道となりました。_5, _6で試していました。。。'''
def _rotating_square_around_origin_9(passed_seconds):
'''_8()に対してcos()の中身だけ正負を逆にして
何が起こるのか探ってみます。'''
ps = passed_seconds
put_on_square(ps, math.cos(ps) * 3 / 8, math.sin(ps) * 8 / 8,
0.1, BLUE)
'''軌道の変化は何も起こりませんでした。
cos(x) = cos(-x) ですから当然ですね。
というわけで、rotatingを負にして、逆回転させました。'''
def _rotating_square_around_origin_10(passed_seconds):
'''_8(), _9() の挙動から、_7()、つまり楕円軌道の白四角に対して
sin()の中身の正負を逆にすると逆回転する気がするので試してみます。'''
ps = passed_seconds
put_on_square(-2 * ps, math.cos(ps) * 3 / 8, math.sin(ps) * 8 / 8,
0.1, GLAY)
'''残念でしたー、逆"軌道"になるが正解でしたー。
sin(x) = -sin(-x) ですから当然ですね。
目で見るために倍速回転にしております。'''
def _rotating_square_around_origin_11(passed_seconds):
'''お遊びでお送りしております。'''
ps = passed_seconds
put_on_square(-2 * ps, math.cos(2*ps) * 3 / 8, math.sin(ps) * 8 / 8,
0.1, YELLOW)
def _rotating_square_around_origin_12(passed_seconds):
'''お遊びでお送りしております。'''
ps = passed_seconds
put_on_square(-2 * ps, math.cos(2*ps) * 3 / 8, math.sin(-2*ps) * 8 / 8,
0.1, RED)
def _rotating_square_around_origin_13(passed_seconds):
'''お遊びでお送りしております。'''
'''こいつが一番好きだ。'''
ps = passed_seconds
put_on_square(-2 * ps, math.cos(ps) * 3 / 8, math.sin(2*ps) * 8 / 8,
0.05, BLUE)
def _rotating_square_around_origin_14(passed_seconds):
'''お遊びでお送りしております。'''
ps = passed_seconds
put_on_square(0, math.cos(ps), 0, 0.05, GREEN)
def _rotating_square_around_origin_15(passed_seconds):
'''お遊びでお送りしております。'''
ps = passed_seconds
put_on_square(0, 0, math.sin(ps), 0.05, GREEN)
def _rotating_square_around_origin_16(passed_seconds):
'''お遊びでお送りしております。'''
'''こいつも好き。点滅させてみたりも。'''
ps = passed_seconds
if int(passed_seconds * 10) % 2 == 0:
color = BLACK
else:
color = MAGENTA
put_on_square(-2 * ps, math.cos(ps), math.sin(2*ps), 0.05, color)
# equal to
# if int(passed_seconds * 10) % 2 == 0:
# pass
# else:
# put_on_square(-2 * ps, math.cos(ps), math.sin(2*ps), 0.05, MAGENTA)
def _rotating_square_on_origin(passed_seconds):
'''白四角形が、原点上を時計回りでくるくる回る。'''
put_on_square(-passed_seconds, 0, 0, 0.1)
|
py | 1a38d9bce4da089385984ef47be6560b6d0ff6c3 | import argparse
import logging
import os
import sys
# prevent asap other modules from defining the root logger using basicConfig
import automl.logger
import automl
from automl.utils import Namespace as ns, config_load, datetime_iso, str2bool
from automl import log
parser = argparse.ArgumentParser()
parser.add_argument('framework', type=str,
help="The framework to evaluate as defined by default in resources/frameworks.yaml.")
parser.add_argument('benchmark', type=str, nargs='?', default='test',
help="The benchmark type to run as defined by default in resources/benchmarks/{benchmark}.yaml "
"or the path to a benchmark description file. Defaults to `%(default)s`.")
parser.add_argument('-m', '--mode', choices=['local', 'docker', 'aws'], default='local',
help="The mode that specifies how/where the benchmark tasks will be running. Defaults to %(default)s.")
parser.add_argument('-t', '--task', metavar='task_id', nargs='*', default=None,
help="The specific task name (as defined in the benchmark file) to run. "
"If not provided, then all tasks from the benchmark will be run.")
parser.add_argument('-f', '--fold', metavar='fold_num', type=int, nargs='*', default=None,
help="If task is provided, the specific fold(s) to run. "
"If fold is not provided, then all folds from the task definition will be run.")
parser.add_argument('-i', '--indir', metavar='input_dir', default=None,
help="Folder where datasets are loaded by default. Defaults to `input_dir` as defined in resources/config.yaml")
parser.add_argument('-o', '--outdir', metavar='output_dir', default=None,
help="Folder where all the outputs should be written. Defaults to `output_dir` as defined in resources/config.yaml")
parser.add_argument('-u', '--userdir', metavar='user_dir', default=None,
help="Folder where all the customizations are stored. Defaults to `user_dir` as defined in resources/config.yaml")
parser.add_argument('-p', '--parallel', metavar='parallel_jobs', type=int, default=1,
help="The number of jobs (i.e. tasks or folds) that can run in parallel. Defaults to %(default)s. "
"Currently supported only in docker and aws mode.")
parser.add_argument('-s', '--setup', choices=['auto', 'skip', 'force', 'only'], default='auto',
help="Framework/platform setup mode. Defaults to %(default)s. "
"•auto: setup is executed only if strictly necessary. •skip: setup is skipped. •force: setup is always executed before the benchmark. •only: only setup is executed (no benchmark).")
parser.add_argument('-k', '--keep-scores', type=str2bool, metavar='true|false', nargs='?', const=True, default=True,
help="Set to true [default] to save/add scores in output directory.")
parser.add_argument('--profiling', nargs='?', const=True, default=False, help=argparse.SUPPRESS)
parser.add_argument('-X', '--extra', default=[], action='append', help=argparse.SUPPRESS)
# group = parser.add_mutually_exclusive_group()
# group.add_argument('--keep-scores', dest='keep_scores', action='store_true',
# help="Set to true [default] to save/add scores in output directory")
# group.add_argument('--no-keep-scores', dest='keep_scores', action='store_false')
# parser.set_defaults(keep_scores=True)
# removing this command line argument for now: by default, we're using the user default region as defined in ~/aws/config
# on top of this, user can now override the aws.region setting in his custom ~/.config/automlbenchmark/config.yaml settings.
# parser.add_argument('-r', '--region', metavar='aws_region', default=None,
# help="The region on which to run the benchmark when using AWS.")
args = parser.parse_args()
script_name = os.path.splitext(os.path.basename(__file__))[0]
log_dir = os.path.join(args.outdir if args.outdir else '.', 'logs')
os.makedirs(log_dir, exist_ok=True)
now_str = datetime_iso(date_sep='', time_sep='')
# now_str = datetime_iso(time=False, no_sep=True)
if args.profiling:
logging.TRACE = logging.INFO
automl.logger.setup(log_file=os.path.join(log_dir, '{script}_{now}.log'.format(script=script_name, now=now_str)),
root_file=os.path.join(log_dir, '{script}_{now}_full.log'.format(script=script_name, now=now_str)),
root_level='DEBUG', console_level='INFO', print_to_log=True)
log.info("Running `%s` on `%s` benchmarks in `%s` mode.", args.framework, args.benchmark, args.mode)
log.debug("Script args: %s.", args)
extras = {t[0]: t[1] if len(t) > 1 else True for t in [x.split('=', 1) for x in args.extra]}
config = config_load("resources/config.yaml")
# allowing config override from user_dir: useful to define custom benchmarks and frameworks for example.
config_user = config_load(os.path.join(args.userdir if args.userdir is not None else config.user_dir, "config.yaml"))
# config listing properties set by command line
config_args = ns.parse(
{'results.save': args.keep_scores},
input_dir=args.indir,
output_dir=args.outdir,
user_dir=args.userdir,
run_mode=args.mode,
script=os.path.basename(__file__),
) + ns.parse(extras)
config_args = ns({k: v for k, v in config_args if v is not None})
log.debug("Config args: %s.", config_args)
# merging all configuration files
automl.resources.from_configs(config, config_user, config_args)
try:
if args.mode == 'local':
bench = automl.Benchmark(args.framework, args.benchmark, parallel_jobs=args.parallel)
elif args.mode == 'docker':
bench = automl.DockerBenchmark(args.framework, args.benchmark, parallel_jobs=args.parallel)
elif args.mode == 'aws':
bench = automl.AWSBenchmark(args.framework, args.benchmark, parallel_jobs=args.parallel)
# bench = automl.AWSBenchmark(args.framework, args.benchmark, parallel_jobs=args.parallel, region=args.region)
# elif args.mode == "aws-remote":
# bench = automl.AWSRemoteBenchmark(args.framework, args.benchmark, parallel_jobs=args.parallel, region=args.region)
else:
raise ValueError("`mode` must be one of 'aws', 'docker' or 'local'.")
if args.setup == 'only':
log.warning("Setting up %s environment only for %s, no benchmark will be run.", args.mode, args.framework)
if not args.keep_scores and args.mode != 'local':
log.warning("`keep_scores` parameter is currently ignored in %s mode, scores are always saved in this mode.", args.mode)
bench.setup(automl.Benchmark.SetupMode[args.setup])
if args.setup != 'only':
res = bench.run(args.task, args.fold)
except ValueError as e:
log.error('\nERROR:\n%s', e)
if extras.get('verbose') is True:
log.exception(e)
sys.exit(1)
|
py | 1a38dbe17369e5e52b64b41bf644cbe887d21044 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class HeaderOperations(object):
"""HeaderOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def param_existing_key(
self, user_agent, custom_headers=None, raw=False, **operation_config):
"""Send a post request with header value "User-Agent": "overwrite".
:param user_agent: Send a post request with header value "User-Agent":
"overwrite"
:type user_agent: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/param/existingkey'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['User-Agent'] = self._serialize.header("user_agent", user_agent, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def response_existing_key(
self, custom_headers=None, raw=False, **operation_config):
"""Get a response with header value "User-Agent": "overwrite".
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/response/existingkey'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'User-Agent': 'str',
})
return client_raw_response
def param_protected_key(
self, content_type, custom_headers=None, raw=False, **operation_config):
"""Send a post request with header value "Content-Type": "text/html".
:param content_type: Send a post request with header value
"Content-Type": "text/html"
:type content_type: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/param/protectedkey'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def response_protected_key(
self, custom_headers=None, raw=False, **operation_config):
"""Get a response with header value "Content-Type": "text/html".
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/response/protectedkey'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'Content-Type': 'str',
})
return client_raw_response
def param_integer(
self, scenario, value, custom_headers=None, raw=False, **operation_config):
"""Send a post request with header values "scenario": "positive", "value":
1 or "scenario": "negative", "value": -2 .
:param scenario: Send a post request with header values "scenario":
"positive" or "negative"
:type scenario: str
:param value: Send a post request with header values 1 or -2
:type value: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/param/prim/integer'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['scenario'] = self._serialize.header("scenario", scenario, 'str')
header_parameters['value'] = self._serialize.header("value", value, 'int')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def response_integer(
self, scenario, custom_headers=None, raw=False, **operation_config):
"""Get a response with header value "value": 1 or -2.
:param scenario: Send a post request with header values "scenario":
"positive" or "negative"
:type scenario: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/response/prim/integer'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['scenario'] = self._serialize.header("scenario", scenario, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'value': 'int',
})
return client_raw_response
def param_long(
self, scenario, value, custom_headers=None, raw=False, **operation_config):
"""Send a post request with header values "scenario": "positive", "value":
105 or "scenario": "negative", "value": -2 .
:param scenario: Send a post request with header values "scenario":
"positive" or "negative"
:type scenario: str
:param value: Send a post request with header values 105 or -2
:type value: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/param/prim/long'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['scenario'] = self._serialize.header("scenario", scenario, 'str')
header_parameters['value'] = self._serialize.header("value", value, 'long')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def response_long(
self, scenario, custom_headers=None, raw=False, **operation_config):
"""Get a response with header value "value": 105 or -2.
:param scenario: Send a post request with header values "scenario":
"positive" or "negative"
:type scenario: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/response/prim/long'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['scenario'] = self._serialize.header("scenario", scenario, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'value': 'long',
})
return client_raw_response
def param_float(
self, scenario, value, custom_headers=None, raw=False, **operation_config):
"""Send a post request with header values "scenario": "positive", "value":
0.07 or "scenario": "negative", "value": -3.0.
:param scenario: Send a post request with header values "scenario":
"positive" or "negative"
:type scenario: str
:param value: Send a post request with header values 0.07 or -3.0
:type value: float
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/param/prim/float'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['scenario'] = self._serialize.header("scenario", scenario, 'str')
header_parameters['value'] = self._serialize.header("value", value, 'float')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def response_float(
self, scenario, custom_headers=None, raw=False, **operation_config):
"""Get a response with header value "value": 0.07 or -3.0.
:param scenario: Send a post request with header values "scenario":
"positive" or "negative"
:type scenario: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/response/prim/float'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['scenario'] = self._serialize.header("scenario", scenario, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'value': 'float',
})
return client_raw_response
def param_double(
self, scenario, value, custom_headers=None, raw=False, **operation_config):
"""Send a post request with header values "scenario": "positive", "value":
7e120 or "scenario": "negative", "value": -3.0.
:param scenario: Send a post request with header values "scenario":
"positive" or "negative"
:type scenario: str
:param value: Send a post request with header values 7e120 or -3.0
:type value: float
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/param/prim/double'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['scenario'] = self._serialize.header("scenario", scenario, 'str')
header_parameters['value'] = self._serialize.header("value", value, 'float')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def response_double(
self, scenario, custom_headers=None, raw=False, **operation_config):
"""Get a response with header value "value": 7e120 or -3.0.
:param scenario: Send a post request with header values "scenario":
"positive" or "negative"
:type scenario: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/response/prim/double'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['scenario'] = self._serialize.header("scenario", scenario, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'value': 'float',
})
return client_raw_response
def param_bool(
self, scenario, value, custom_headers=None, raw=False, **operation_config):
"""Send a post request with header values "scenario": "true", "value":
true or "scenario": "false", "value": false.
:param scenario: Send a post request with header values "scenario":
"true" or "false"
:type scenario: str
:param value: Send a post request with header values true or false
:type value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/param/prim/bool'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['scenario'] = self._serialize.header("scenario", scenario, 'str')
header_parameters['value'] = self._serialize.header("value", value, 'bool')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def response_bool(
self, scenario, custom_headers=None, raw=False, **operation_config):
"""Get a response with header value "value": true or false.
:param scenario: Send a post request with header values "scenario":
"true" or "false"
:type scenario: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/response/prim/bool'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['scenario'] = self._serialize.header("scenario", scenario, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'value': 'bool',
})
return client_raw_response
def param_string(
self, scenario, value=None, custom_headers=None, raw=False, **operation_config):
"""Send a post request with header values "scenario": "valid", "value":
"The quick brown fox jumps over the lazy dog" or "scenario": "null",
"value": null or "scenario": "empty", "value": "".
:param scenario: Send a post request with header values "scenario":
"valid" or "null" or "empty"
:type scenario: str
:param value: Send a post request with header values "The quick brown
fox jumps over the lazy dog" or null or ""
:type value: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/param/prim/string'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['scenario'] = self._serialize.header("scenario", scenario, 'str')
if value is not None:
header_parameters['value'] = self._serialize.header("value", value, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def response_string(
self, scenario, custom_headers=None, raw=False, **operation_config):
"""Get a response with header values "The quick brown fox jumps over the
lazy dog" or null or "".
:param scenario: Send a post request with header values "scenario":
"valid" or "null" or "empty"
:type scenario: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/response/prim/string'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['scenario'] = self._serialize.header("scenario", scenario, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'value': 'str',
})
return client_raw_response
def param_date(
self, scenario, value, custom_headers=None, raw=False, **operation_config):
"""Send a post request with header values "scenario": "valid", "value":
"2010-01-01" or "scenario": "min", "value": "0001-01-01".
:param scenario: Send a post request with header values "scenario":
"valid" or "min"
:type scenario: str
:param value: Send a post request with header values "2010-01-01" or
"0001-01-01"
:type value: date
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/param/prim/date'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['scenario'] = self._serialize.header("scenario", scenario, 'str')
header_parameters['value'] = self._serialize.header("value", value, 'date')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def response_date(
self, scenario, custom_headers=None, raw=False, **operation_config):
"""Get a response with header values "2010-01-01" or "0001-01-01".
:param scenario: Send a post request with header values "scenario":
"valid" or "min"
:type scenario: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/response/prim/date'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['scenario'] = self._serialize.header("scenario", scenario, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'value': 'date',
})
return client_raw_response
def param_datetime(
self, scenario, value, custom_headers=None, raw=False, **operation_config):
"""Send a post request with header values "scenario": "valid", "value":
"2010-01-01T12:34:56Z" or "scenario": "min", "value":
"0001-01-01T00:00:00Z".
:param scenario: Send a post request with header values "scenario":
"valid" or "min"
:type scenario: str
:param value: Send a post request with header values
"2010-01-01T12:34:56Z" or "0001-01-01T00:00:00Z"
:type value: datetime
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/param/prim/datetime'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['scenario'] = self._serialize.header("scenario", scenario, 'str')
header_parameters['value'] = self._serialize.header("value", value, 'iso-8601')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def response_datetime(
self, scenario, custom_headers=None, raw=False, **operation_config):
"""Get a response with header values "2010-01-01T12:34:56Z" or
"0001-01-01T00:00:00Z".
:param scenario: Send a post request with header values "scenario":
"valid" or "min"
:type scenario: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/response/prim/datetime'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['scenario'] = self._serialize.header("scenario", scenario, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'value': 'iso-8601',
})
return client_raw_response
def param_datetime_rfc1123(
self, scenario, value=None, custom_headers=None, raw=False, **operation_config):
"""Send a post request with header values "scenario": "valid", "value":
"Wed, 01 Jan 2010 12:34:56 GMT" or "scenario": "min", "value": "Mon, 01
Jan 0001 00:00:00 GMT".
:param scenario: Send a post request with header values "scenario":
"valid" or "min"
:type scenario: str
:param value: Send a post request with header values "Wed, 01 Jan 2010
12:34:56 GMT" or "Mon, 01 Jan 0001 00:00:00 GMT"
:type value: datetime
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/param/prim/datetimerfc1123'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['scenario'] = self._serialize.header("scenario", scenario, 'str')
if value is not None:
header_parameters['value'] = self._serialize.header("value", value, 'rfc-1123')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def response_datetime_rfc1123(
self, scenario, custom_headers=None, raw=False, **operation_config):
"""Get a response with header values "Wed, 01 Jan 2010 12:34:56 GMT" or
"Mon, 01 Jan 0001 00:00:00 GMT".
:param scenario: Send a post request with header values "scenario":
"valid" or "min"
:type scenario: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/response/prim/datetimerfc1123'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['scenario'] = self._serialize.header("scenario", scenario, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'value': 'rfc-1123',
})
return client_raw_response
def param_duration(
self, scenario, value, custom_headers=None, raw=False, **operation_config):
"""Send a post request with header values "scenario": "valid", "value":
"P123DT22H14M12.011S".
:param scenario: Send a post request with header values "scenario":
"valid"
:type scenario: str
:param value: Send a post request with header values
"P123DT22H14M12.011S"
:type value: timedelta
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/param/prim/duration'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['scenario'] = self._serialize.header("scenario", scenario, 'str')
header_parameters['value'] = self._serialize.header("value", value, 'duration')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def response_duration(
self, scenario, custom_headers=None, raw=False, **operation_config):
"""Get a response with header values "P123DT22H14M12.011S".
:param scenario: Send a post request with header values "scenario":
"valid"
:type scenario: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/response/prim/duration'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['scenario'] = self._serialize.header("scenario", scenario, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'value': 'duration',
})
return client_raw_response
def param_byte(
self, scenario, value, custom_headers=None, raw=False, **operation_config):
"""Send a post request with header values "scenario": "valid", "value":
"啊齄丂狛狜隣郎隣兀﨩".
:param scenario: Send a post request with header values "scenario":
"valid"
:type scenario: str
:param value: Send a post request with header values "啊齄丂狛狜隣郎隣兀﨩"
:type value: bytearray
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/param/prim/byte'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['scenario'] = self._serialize.header("scenario", scenario, 'str')
header_parameters['value'] = self._serialize.header("value", value, 'bytearray')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def response_byte(
self, scenario, custom_headers=None, raw=False, **operation_config):
"""Get a response with header values "啊齄丂狛狜隣郎隣兀﨩".
:param scenario: Send a post request with header values "scenario":
"valid"
:type scenario: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/response/prim/byte'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['scenario'] = self._serialize.header("scenario", scenario, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'value': 'bytearray',
})
return client_raw_response
def param_enum(
self, scenario, value=None, custom_headers=None, raw=False, **operation_config):
"""Send a post request with header values "scenario": "valid", "value":
"GREY" or "scenario": "null", "value": null.
:param scenario: Send a post request with header values "scenario":
"valid" or "null" or "empty"
:type scenario: str
:param value: Send a post request with header values 'GREY'. Possible
values include: 'White', 'black', 'GREY'
:type value: str or :class:`GreyscaleColors
<fixtures.acceptancetestsheader.models.GreyscaleColors>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/param/prim/enum'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['scenario'] = self._serialize.header("scenario", scenario, 'str')
if value is not None:
header_parameters['value'] = self._serialize.header("value", value, 'GreyscaleColors')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def response_enum(
self, scenario, custom_headers=None, raw=False, **operation_config):
"""Get a response with header values "GREY" or null.
:param scenario: Send a post request with header values "scenario":
"valid" or "null" or "empty"
:type scenario: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/response/prim/enum'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['scenario'] = self._serialize.header("scenario", scenario, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'value': models.GreyscaleColors,
})
return client_raw_response
def custom_request_id(
self, custom_headers=None, raw=False, **operation_config):
"""Send x-ms-client-request-id = 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0 in
the header of the request.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsheader.models.ErrorException>`
"""
# Construct URL
url = '/header/custom/x-ms-client-request-id/9C4D50EE-2D56-4CD3-8152-34347DC9F2B0'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
|
py | 1a38dd5d3e23ae62580dc6ecc490f3f792de0d08 | import numpy
from six import moves
import chainer
from chainer import cuda
from chainer import function
from chainer.utils import conv
from chainer.utils import type_check
from chainer import variable
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cuda.cudnn.cudnn
_fwd_pref = libcudnn.CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT
_bwd_filter_pref = \
libcudnn.CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT
_bwd_data_pref = \
libcudnn.CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
class DilatedConvolution2DFunction(function.Function):
def __init__(self, stride=1, pad=0, dilate=1, cover_all=False,
requires_x_grad=True):
self.sy, self.sx = _pair(stride)
self.ph, self.pw = _pair(pad)
self.dy, self.dx = _pair(dilate)
self.cover_all = cover_all
self.requires_x_grad = requires_x_grad
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 <= n_in, n_in <= 3)
x_type = in_types[0]
w_type = in_types[1]
type_check.expect(
x_type.dtype.kind == 'f',
w_type.dtype.kind == 'f',
x_type.ndim == 4,
w_type.ndim == 4,
x_type.shape[1] == w_type.shape[1],
)
if type_check.eval(n_in) == 3:
b_type = in_types[2]
type_check.expect(
b_type.dtype == x_type.dtype,
b_type.ndim == 1,
b_type.shape[0] == w_type.shape[0],
)
def forward_cpu(self, inputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
if not all([isinstance(i, numpy.ndarray) for i in inputs]):
if b is not None:
raise ValueError('numpy and cupy must not be used together\n'
'type(W): {0}, type(x): {1}, type(b): {2}'
.format(type(W), type(x), type(b)))
else:
raise ValueError('numpy and cupy must not be used together\n'
'type(W): {0}, type(x): {1}'
.format(type(W), type(x)))
kh, kw = W.shape[2:]
self.col = conv.im2col_cpu(
x, kh, kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all, dy=self.dy, dx=self.dx)
y = numpy.tensordot(
self.col, W, ((1, 2, 3), (1, 2, 3))).astype(x.dtype, copy=False)
if b is not None:
y += b
return numpy.rollaxis(y, 3, 1),
def forward_gpu(self, inputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
if not all([isinstance(i, cuda.ndarray) for i in inputs]):
if b is not None:
raise ValueError('numpy and cupy must not be used together\n'
'type(W): {0}, type(x): {1}, type(b): {2}'
.format(type(W), type(x), type(b)))
else:
raise ValueError('numpy and cupy must not be used together\n'
'type(W): {0}, type(x): {1}'
.format(type(W), type(x)))
out_c, _, kh, kw = W.shape
n, c, h, w = x.shape
dkh, dkw = kh + (kh - 1) * (self.dy - 1), kw + (kw - 1) * (self.dx - 1)
out_h = conv.get_conv_outsize(h, kh, self.sy, self.ph,
cover_all=self.cover_all, d=self.dy)
out_w = conv.get_conv_outsize(w, kw, self.sx, self.pw,
cover_all=self.cover_all, d=self.dx)
y = cuda.cupy.zeros((n, out_c, out_h, out_w), dtype=x.dtype)
if (not self.cover_all and chainer.should_use_cudnn('>=auto') and
x.dtype == W.dtype):
pad_x = cuda.cupy.zeros((n, c, h + 2 * self.ph, w + 2 * self.pw),
dtype=x.dtype)
pad_x[:, :, self.ph:self.ph + h, self.pw:self.pw + w] = x
out_h_s1 = h + 2 * self.ph - dkh + 1
out_w_s1 = w + 2 * self.pw - dkw + 1
for j in moves.range(kh):
for i in moves.range(kw):
xji = cuda.cupy.ascontiguousarray(
pad_x[:, :,
j * self.dy:j * self.dy + out_h_s1,
i * self.dx:i * self.dx + out_w_s1])
Wji = cuda.cupy.ascontiguousarray(
W[:, :, j:j + 1, i:i + 1])
if i == 0 and j == 0:
handle = cudnn.get_handle()
xji_desc = cudnn.create_tensor_descriptor(xji)
y_desc = cudnn.create_tensor_descriptor(y)
self.filter_desc = cudnn.create_filter_descriptor(Wji)
self.conv_desc = cudnn.create_convolution_descriptor(
(0, 0), (self.sy, self.sx), xji.dtype)
workspace_size = cuda.get_max_workspace_size()
workspace = cuda.cupy.empty(
(workspace_size,), dtype='b')
algo = libcudnn.getConvolutionForwardAlgorithm(
handle, xji_desc.value, self.filter_desc.value,
self.conv_desc.value, y_desc.value, _fwd_pref,
workspace_size)
oz_dtype = 'd' if x.dtype == 'd' else 'f'
one = numpy.array(1, dtype=oz_dtype).ctypes
libcudnn.convolutionForward(
handle, one.data, xji_desc.value, xji.data.ptr,
self.filter_desc.value, Wji.data.ptr,
self.conv_desc.value, algo, workspace.data.ptr,
workspace_size, one.data, y_desc.value, y.data.ptr)
if b is not None:
b = cuda.cupy.ascontiguousarray(b)
self.bias_desc = cudnn.create_tensor_descriptor(
b[None, :, None, None])
cudnn.add_tensor(
handle, one.data, self.bias_desc.value, b.data.ptr,
one.data, y_desc.value, y.data.ptr)
else:
# Implementation using im2col
self.col = conv.im2col_gpu(
x, kh, kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all, dy=self.dy, dx=self.dx)
y = cuda.cupy.tensordot(
self.col, W, ((1, 2, 3), (1, 2, 3))).astype(x.dtype,
copy=False)
# TODO(beam2d): Support unshared bias
if b is not None:
y += b
y = cuda.cupy.rollaxis(y, 3, 1)
return y,
def backward_cpu(self, inputs, grad_outputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
gy = grad_outputs[0]
h, w = x.shape[2:]
gW = numpy.tensordot(
gy, self.col, ((0, 2, 3), (0, 4, 5))).astype(W.dtype, copy=False)
if not self.requires_x_grad:
gx = None
else:
gcol = numpy.tensordot(W, gy, (0, 1)).astype(x.dtype, copy=False)
gcol = numpy.rollaxis(gcol, 3)
gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw,
h, w, dy=self.dy, dx=self.dx)
if b is None:
return gx, gW
else:
gb = gy.sum(axis=(0, 2, 3))
return gx, gW, gb
def backward_gpu(self, inputs, grad_outputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
gy = grad_outputs[0]
_, out_c, out_h, out_w = gy.shape
n, c, h, w = x.shape
kh, kw = W.shape[2:]
dkh, dkw = kh + (kh - 1) * (self.dy - 1), kw + (kw - 1) * (self.dx - 1)
gW = cuda.cupy.empty_like(W)
if (not self.cover_all and chainer.should_use_cudnn('>=auto') and
x.dtype == W.dtype):
pad_x = cuda.cupy.zeros(
(n, c, h + 2 * self.ph, w + 2 * self.pw), dtype=x.dtype)
pad_x[:, :, self.ph:self.ph + h, self.pw:self.pw + w] = x
out_h_s1 = h + 2 * self.ph - dkh + 1
out_w_s1 = w + 2 * self.pw - dkw + 1
out_sh = out_h + (out_h - 1) * (self.sy - 1)
out_sw = out_w + (out_w - 1) * (self.sx - 1)
gy_ph = (h + dkh - out_sh - 1) / 2
gy_pw = (w + dkw - out_sw - 1) / 2
pad_gy = cuda.cupy.zeros(
(n, out_c, h + dkh - 1, w + dkw - 1), dtype=x.dtype)
pad_gy[:, :,
gy_ph:gy_ph + out_sh:self.sy,
gy_pw:gy_pw + out_sw:self.sx] = gy
gx = None
for j in moves.range(kh):
for i in moves.range(kw):
xji = cuda.cupy.ascontiguousarray(
pad_x[:, :,
j * self.dy:j * self.dy + out_h_s1,
i * self.dx:i * self.dx + out_w_s1])
gyji = cuda.cupy.ascontiguousarray(
pad_gy[:, :,
j * self.dy:j * self.dy + h,
i * self.dx:i * self.dx + w])
Wji = cuda.cupy.ascontiguousarray(
W[:, :, -1::-1, -1::-1][:, :, j:j + 1, i:i + 1])
if i == 0 and j == 0:
x = cuda.cupy.ascontiguousarray(x)
gy = cuda.cupy.ascontiguousarray(gy)
handle = cudnn.get_handle()
x_desc = cudnn.create_tensor_descriptor(x)
xji_desc = cudnn.create_tensor_descriptor(xji)
gy_desc = cudnn.create_tensor_descriptor(gy)
gyji_desc = cudnn.create_tensor_descriptor(gyji)
conv_desc_data = cudnn.create_convolution_descriptor(
(0, 0), (1, 1), xji.dtype)
oz_dtype = 'd' if x.dtype == 'd' else 'f'
one = numpy.array(1, dtype=oz_dtype).ctypes
zero = numpy.array(0, dtype=oz_dtype).ctypes
if self.requires_x_grad:
gx = cuda.cupy.zeros_like(x)
gWji = cuda.cupy.empty((out_c, c, 1, 1), dtype=W.dtype)
workspace_size = cuda.get_max_workspace_size()
workspace = cuda.cupy.empty(
(workspace_size,), dtype='b')
algo_filter = (
libcudnn.getConvolutionBackwardFilterAlgorithm(
handle, xji_desc.value, gy_desc.value,
self.conv_desc.value,
self.filter_desc.value,
_bwd_filter_pref, workspace_size))
algo_data = (
libcudnn.getConvolutionBackwardDataAlgorithm(
handle, self.filter_desc.value,
gyji_desc.value, conv_desc_data.value,
x_desc.value, _bwd_data_pref,
workspace_size))
libcudnn.convolutionBackwardFilter_v3(
handle, one.data, xji_desc.value, xji.data.ptr,
gy_desc.value, gy.data.ptr, self.conv_desc.value,
algo_filter, workspace.data.ptr, workspace_size,
zero.data, self.filter_desc.value, gWji.data.ptr)
if self.requires_x_grad:
libcudnn.convolutionBackwardData_v3(
handle, one.data, self.filter_desc.value,
Wji.data.ptr, gyji_desc.value,
gyji.data.ptr, conv_desc_data.value,
algo_data, workspace.data.ptr, workspace_size,
one.data, x_desc.value, gx.data.ptr)
gW[:, :, j:j + 1, i:i + 1] = gWji
if b is not None:
gb = cuda.cupy.empty_like(b)
libcudnn.convolutionBackwardBias(
handle, one.data, gy_desc.value, gy.data.ptr,
zero.data, self.bias_desc.value, gb.data.ptr)
else:
gW = cuda.cupy.tensordot(
gy, self.col, ((0, 2, 3), (0, 4, 5))).astype(W.dtype,
copy=False)
if not self.requires_x_grad:
gx = None
else:
gcol = cuda.cupy.tensordot(W, gy, (0, 1)).astype(x.dtype,
copy=False)
gcol = cuda.cupy.rollaxis(gcol, 3)
gx = conv.col2im_gpu(gcol, self.sy, self.sx, self.ph, self.pw,
h, w, dy=self.dy, dx=self.dx)
if b is not None:
gb = gy.sum(axis=(0, 2, 3))
if b is None:
return gx, gW
else:
return gx, gW, gb
def dilated_convolution_2d(x, W, b=None, stride=1, pad=0, dilate=1,
cover_all=False):
"""Two-dimensional dilated convolution function.
This is an implementation of two-dimensional dilated convolution
in ConvNets.
It takes three variables: the input image ``x``, the filter weight ``W``,
and the bias vector ``b``.
Notation: here is a notation for dimensionalities.
- :math:`n` is the batch size.
- :math:`c_I` and :math:`c_O` are the number of the input and output,
respectively.
- :math:`h` and :math:`w` are the height and width of the input image,
respectively.
- :math:`k_H` and :math:`k_W` are the height and width of the filters,
respectively.
Args:
x (~chainer.Variable): Input variable of shape :math:`(n, c_I, h, w)`.
W (~chainer.Variable): Weight variable of shape
:math:`(c_O, c_I, k_H, k_W)`.
b (~chainer.Variable): Bias variable of length :math:`c_O` (optional).
stride (int or pair of ints): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
pad (int or pair of ints): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
dilate (int or pair of ints): Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d)`` are equivalent.
cover_all (bool): If ``True``, all spatial locations are convoluted
into some output pixels. It may make the output size larger.
Returns:
~chainer.Variable: Output variable.
The two-dimensional dilated convolution function is defined as follows.
Then the ``DilatedConvolution2D`` function computes correlations
between filters and patches of size :math:`(k_H, k_W)` in ``x``.
Patches here are extracted at intervals of the dilation factor.
Note that correlation here is equivalent to the inner product between
expanded vectors.
Patches are extracted at intervals of the dilation factor and at positions
shifted by multiples of ``stride`` from the first position ``-pad`` for
each spatial axis. The right-most (or bottom-most) patches do not run over
the padded spatial size.
Let :math:`(s_Y, s_X)` be the stride of filter application,
:math:`(p_H, p_W)` the spatial padding size, and :math:`(d_Y, d_X)`
the dilation factor of filter application. Then, the output size
:math:`(h_O, w_O)` is determined by the following equations:
.. math::
h_O &= (h + 2p_H - k_H - (k_H - 1) * (d_Y - 1)) / s_Y + 1,\\\\
w_O &= (w + 2p_W - k_W - (k_W - 1) * (d_X - 1)) / s_X + 1.
If the bias vector is given, then it is added to all spatial locations of
the output of convolution.
.. seealso:: :class:`DilatedConvolution2D`
"""
requires_x_grad = isinstance(x, variable.Variable) and x.requires_grad
func = DilatedConvolution2DFunction(stride, pad, dilate, cover_all,
requires_x_grad)
if b is None:
return func(x, W)
else:
return func(x, W, b)
|
py | 1a38dd777d3e6405af92d30bd92c65eb59b0695c | from xml.dom import minidom as xd
import re
from AbstractRule import AbstractRule
class FileNamingRule(AbstractRule):
def __init__(self):
AbstractRule.__init__(self)
self.DictionaryList = []
self.DictionaryBaseClassList = []
def execute(self):
f = open("./Rules/FileNamingRules/" + self.ParameterList[0], 'r')
lines = f.readlines()
for line in lines:
self.DictionaryList.append(line.replace("\n","").replace("\r",""))
fBase = open("./Rules/FileNamingRules/" + self.ParameterList[1], 'r')
linesBase = fBase.readlines()
for lineBase in linesBase:
self.DictionaryBaseClassList.append(lineBase.replace("\n","").replace("\r",""))
self.dom = xd.parse(self.FullPathInputFile)
className = self.dom.getElementsByTagName('compounddef')[0].getElementsByTagName('compoundname')[0].firstChild.nodeValue
if(self.dom.getElementsByTagName('compounddef')[0].getElementsByTagName('basecompoundref') == None \
or len(self.dom.getElementsByTagName('compounddef')[0].getElementsByTagName('basecompoundref')) == 0):
for prefix in self.DictionaryList:
x = re.compile("^"+ prefix +"[A-Z].*")
cname = className
if("::" in className) :
cname = className[className.index("::")+2:]
if(re.match(x, str(cname))):
#print "OK " , cname
return self.MarkedList
#print "***NO without Base *** " , cname
self.MarkedList.append("<item><class>" + str(className) + "</class></item>")
return self.MarkedList
result = False
goodPrefix = ""
for prefix in self.DictionaryList:
x = re.compile("^"+ prefix +"[A-Z].*")
if(re.match(x, str(className))):
result = True
goodPrefix = prefix;
break;
if(result == False):
#print "***NO 1 with base*** " , className
self.MarkedList.append("<item><class>" + str(className) + "</class></item>")
return self.MarkedList
for skipName in self.DictionaryBaseClassList:
if(skipName == str(className)):
return self.MarkedList
baseClassName = self.dom.getElementsByTagName('compounddef')[0].getElementsByTagName('basecompoundref')[0].firstChild.nodeValue
## inheritance rule
x2 = re.compile("^"+ baseClassName +".*") #baseClassName or goodPrefix
if(re.match(x2, str(className))):
##print "OK " , className , baseClassName
return self.MarkedList
##print "***NO 2*** " , className , baseClassName
self.MarkedList.append("<item><class>" + str(className) + "</class></item>")
return self.MarkedList
|
py | 1a38dde80bbda68f0366ad320cd59d7fbc944a0b | '''
torch implementation
https://github.com/sksq96/pytorch-summary/blob/master/torchsummary/torchsummary.py
'''
import numpy as np
import jittor as jt
from jittor import nn
from jittor import init
from collections import OrderedDict
device_list = ['cpu', 'cuda']
def summary(model, input_size, batch_size=-1, device='cpu', dtypes=None):
assert(device in device_list)
result, params_info = summary_string(
model, input_size, batch_size, device, dtypes)
print(result)
return params_info
def summary_string(model, input_size, batch_size=-1, device='cpu', dtypes=None):
assert(device in device_list)
if device == 'cuda':
jt.flags.use_cuda = 1
else:
jt.flags.use_cuda = 0
if dtypes == None:
dtypes = [jt.float]*len(input_size)
summary_str = ''
def register_hook(module):
def hook(module, input, output):
class_name = str(module.__class__).split(".")[-1].split("'")[0]
module_idx = len(summary)
m_key = "%s-%i" % (class_name, module_idx + 1)
summary[m_key] = OrderedDict()
summary[m_key]["input_shape"] = list(input[0].size())
summary[m_key]["input_shape"][0] = batch_size
if isinstance(output, (list, tuple)):
summary[m_key]["output_shape"] = [
[-1] + list(o.size())[1:] for o in output
]
else:
summary[m_key]["output_shape"] = list(output.size())
summary[m_key]["output_shape"][0] = batch_size
params = 0
if hasattr(module, "weight") and hasattr(module.weight, "size"):
params += np.prod(np.array(list(module.weight.size()), dtype = np.int64))
summary[m_key]["trainable"] = module.weight.requires_grad
if hasattr(module, "bias") and hasattr(module.bias, "size"):
params += np.prod(np.array(list(module.bias.size()), dtype = np.int64))
summary[m_key]["nb_params"] = params
if (
not isinstance(module, nn.Sequential)
and not isinstance(module, nn.ModuleList)
):
hooks.append(module.register_forward_hook(hook))
# multiple inputs to the network
if isinstance(input_size, tuple):
input_size = [input_size]
# batch_size of 2 for batchnorm
x = [jt.rand(2, *in_size).float()
for in_size in input_size]
# create properties
summary = OrderedDict()
hooks = []
# register hook
model.apply(register_hook)
# make a forward pass
# print(x.shape)
model(*x)
# remove these hooks
for h in hooks:
if h:
h.remove()
summary_str += "----------------------------------------------------------------" + "\n"
line_new = "{:>20} {:>25} {:>15}".format(
"Layer (type)", "Output Shape", "Param #")
summary_str += line_new + "\n"
summary_str += "================================================================" + "\n"
total_params = 0
total_output = 0
trainable_params = 0
for layer in summary:
# input_shape, output_shape, trainable, nb_params
line_new = "{:>20} {:>25} {:>15}".format(
layer,
str(summary[layer]["output_shape"]),
"{0:,}".format(summary[layer]["nb_params"]),
)
total_params += summary[layer]["nb_params"]
total_output += np.prod(summary[layer]["output_shape"])
if "trainable" in summary[layer]:
if summary[layer]["trainable"] == True:
trainable_params += summary[layer]["nb_params"]
summary_str += line_new + "\n"
# assume 4 bytes/number (float on cuda).
total_input_size = abs(np.prod(sum(input_size, ()))
* batch_size * 4. / (1024 ** 2.))
total_output_size = abs(2. * total_output * 4. /
(1024 ** 2.)) # x2 for gradients
total_params_size = abs(total_params * 4. / (1024 ** 2.))
total_size = total_params_size + total_output_size + total_input_size
summary_str += "================================================================" + "\n"
summary_str += "Total params: {0:,}".format(total_params) + "\n"
summary_str += "Trainable params: {0:,}".format(trainable_params) + "\n"
summary_str += "Non-trainable params: {0:,}".format(total_params -
trainable_params) + "\n"
summary_str += "----------------------------------------------------------------" + "\n"
summary_str += "Input size (MB): %0.2f" % total_input_size + "\n"
summary_str += "Forward/backward pass size (MB): %0.2f" % total_output_size + "\n"
summary_str += "Params size (MB): %0.2f" % total_params_size + "\n"
summary_str += "Estimated Total Size (MB): %0.2f" % total_size + "\n"
summary_str += "----------------------------------------------------------------" + "\n"
# return summary
return summary_str, (total_params, trainable_params) |
py | 1a38de936aeb68b171c485ffa1dd1ef7c6aaa166 | # Copyright 2022 Dakewe Biotech Corporation. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import torch
from torch import nn
from torch.nn import functional as F
from torchvision import models
from torchvision import transforms
from torchvision.models.feature_extraction import create_feature_extractor
__all__ = [
"ResidualConvBlock",
"Discriminator", "Generator",
"ContentLoss"
]
class ResidualConvBlock(nn.Module):
"""Implements residual conv function.
Args:
channels (int): Number of channels in the input image.
"""
def __init__(self, channels: int) -> None:
super(ResidualConvBlock, self).__init__()
self.rcb = nn.Sequential(
nn.Conv2d(channels, channels, (3, 3), (1, 1), (1, 1), bias=False),
nn.BatchNorm2d(channels),
nn.PReLU(),
nn.Conv2d(channels, channels, (3, 3), (1, 1), (1, 1), bias=False),
nn.BatchNorm2d(channels),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
identity = x
out = self.rcb(x)
out = torch.add(out, identity)
return out
class UpsampleBlock(nn.Module):
def __init__(self, channels: int) -> None:
super(UpsampleBlock, self).__init__()
self.upsample_block = nn.Sequential(
nn.Conv2d(channels, channels * 4, (3, 3), (1, 1), (1, 1)),
nn.PixelShuffle(2),
nn.PReLU(),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = self.upsample_block(x)
return out
class Discriminator(nn.Module):
def __init__(self) -> None:
super(Discriminator, self).__init__()
self.features = nn.Sequential(
# input size. (3) x 96 x 96
nn.Conv2d(3, 64, (3, 3), (1, 1), (1, 1), bias=True),
nn.LeakyReLU(0.2, True),
# state size. (64) x 48 x 48
nn.Conv2d(64, 64, (3, 3), (2, 2), (1, 1), bias=False),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.2, True),
nn.Conv2d(64, 128, (3, 3), (1, 1), (1, 1), bias=False),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, True),
# state size. (128) x 24 x 24
nn.Conv2d(128, 128, (3, 3), (2, 2), (1, 1), bias=False),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, True),
nn.Conv2d(128, 256, (3, 3), (1, 1), (1, 1), bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, True),
# state size. (256) x 12 x 12
nn.Conv2d(256, 256, (3, 3), (2, 2), (1, 1), bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, True),
nn.Conv2d(256, 512, (3, 3), (1, 1), (1, 1), bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, True),
# state size. (512) x 6 x 6
nn.Conv2d(512, 512, (3, 3), (2, 2), (1, 1), bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, True),
)
self.classifier = nn.Sequential(
nn.Linear(512 * 6 * 6, 1024),
nn.LeakyReLU(0.2, True),
nn.Linear(1024, 1),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = self.features(x)
out = torch.flatten(out, 1)
out = self.classifier(out)
return out
class Generator(nn.Module):
def __init__(self) -> None:
super(Generator, self).__init__()
# First conv layer.
self.conv_block1 = nn.Sequential(
nn.Conv2d(3, 64, (9, 9), (1, 1), (4, 4)),
nn.PReLU(),
)
# Features trunk blocks.
trunk = []
for _ in range(16):
trunk.append(ResidualConvBlock(64))
self.trunk = nn.Sequential(*trunk)
# Second conv layer.
self.conv_block2 = nn.Sequential(
nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1), bias=False),
nn.BatchNorm2d(64),
)
# Upscale block
upsampling = []
for _ in range(2):
upsampling.append(UpsampleBlock(64))
self.upsampling = nn.Sequential(*upsampling)
# Output layer.
self.conv_block3 = nn.Conv2d(64, 3, (9, 9), (1, 1), (4, 4))
# Initialize neural network weights
self._initialize_weights()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self._forward_impl(x)
# Support torch.script function
def _forward_impl(self, x: torch.Tensor) -> torch.Tensor:
out1 = self.conv_block1(x)
out = self.trunk(out1)
out2 = self.conv_block2(out)
out = torch.add(out1, out2)
out = self.upsampling(out)
out = self.conv_block3(out)
out = torch.clamp_(out, 0.0, 1.0)
return out
def _initialize_weights(self) -> None:
for module in self.modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
elif isinstance(module, nn.BatchNorm2d):
nn.init.constant_(module.weight, 1)
class ContentLoss(nn.Module):
"""Constructs a content loss function based on the VGG19 network.
Using high-level feature mapping layers from the latter layers will focus more on the texture content of the image.
Paper reference list:
-`Photo-Realistic Single Image Super-Resolution Using a Generative Adversarial Network <https://arxiv.org/pdf/1609.04802.pdf>` paper.
-`ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks <https://arxiv.org/pdf/1809.00219.pdf>` paper.
-`Perceptual Extreme Super Resolution Network with Receptive Field Block <https://arxiv.org/pdf/2005.12597.pdf>` paper.
"""
def __init__(self, feature_model_extractor_node: str,
feature_model_normalize_mean: list,
feature_model_normalize_std: list) -> None:
super(ContentLoss, self).__init__()
# Get the name of the specified feature extraction node
self.feature_model_extractor_node = feature_model_extractor_node
# Load the VGG19 model trained on the ImageNet dataset.
model = models.vgg19(True)
# Extract the thirty-sixth layer output in the VGG19 model as the content loss.
self.feature_extractor = create_feature_extractor(model, [feature_model_extractor_node])
# set to validation mode
self.feature_extractor.eval()
# The preprocessing method of the input data. This is the VGG model preprocessing method of the ImageNet dataset.
self.normalize = transforms.Normalize(feature_model_normalize_mean, feature_model_normalize_std)
# Freeze model parameters.
for model_parameters in self.feature_extractor.parameters():
model_parameters.requires_grad = False
def forward(self, sr_tensor: torch.Tensor, hr_tensor: torch.Tensor) -> torch.Tensor:
# Standardized operations
sr_tensor = self.normalize(sr_tensor)
hr_tensor = self.normalize(hr_tensor)
sr_feature = self.feature_extractor(sr_tensor)[self.feature_model_extractor_node]
hr_feature = self.feature_extractor(hr_tensor)[self.feature_model_extractor_node]
# Find the feature map difference between the two images
content_loss = F.mse_loss(sr_feature, hr_feature)
return content_loss
|
py | 1a38df500e1c7c13efecd62ad38805688c1de528 | from datasets.acdc import GenACDC
from datasets.mnm import GenMNM |
py | 1a38df8139d9489d66925f14359dce21890f901e | """Utilities to get elements of generated spec"""
from apispec.utils import build_reference
def get_schemas(spec):
if spec.openapi_version.major < 3:
return spec.to_dict()["definitions"]
return spec.to_dict()["components"]["schemas"]
def get_parameters(spec):
if spec.openapi_version.major < 3:
return spec.to_dict()["parameters"]
return spec.to_dict()["components"]["parameters"]
def get_responses(spec):
if spec.openapi_version.major < 3:
return spec.to_dict()["responses"]
return spec.to_dict()["components"]["responses"]
def get_security_schemes(spec):
if spec.openapi_version.major < 3:
return spec.to_dict()["securityDefinitions"]
return spec.to_dict()["components"]["securitySchemes"]
def get_paths(spec):
return spec.to_dict()["paths"]
def build_ref(spec, component_type, obj):
return build_reference(component_type, spec.openapi_version.major, obj)
|
py | 1a38e0b3cdd1ed4c39d704e4d95f4f19cd4da050 | """Constants for the xbox integration."""
DOMAIN = "xbox"
OAUTH2_AUTHORIZE = "https://login.live.com/oauth20_authorize.srf"
OAUTH2_TOKEN = "https://login.live.com/oauth20_token.srf"
EVENT_NEW_FAVORITE = "xbox/new_favorite"
|
py | 1a38e15526951e725fe3f417003489a5b8414bfd | #!/usr/bin/python
# Copyright 2014 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts video encoding result data from text files to visualization
data source."""
__author__ = "[email protected] (James Zern),"
__author__ += "[email protected] (Jim Bankoski)"
__author__ += "[email protected] (Harald Alvestrand)"
import encoder
import gviz_api
import math
import mpeg_settings
import numpy
import optimizer
import re
import string
import pick_codec
def bdsnr(metric_set1, metric_set2):
"""
BJONTEGAARD Bjontegaard metric calculation
Bjontegaard's metric allows to compute the average gain in psnr between two
rate-distortion curves [1].
rate1,psnr1 - RD points for curve 1
rate2,psnr2 - RD points for curve 2
returns the calculated Bjontegaard metric 'dsnr'
code adapted from code written by : (c) 2010 Giuseppe Valenzise
http://www.mathworks.com/matlabcentral/fileexchange/27798-bjontegaard-metric/content/bjontegaard.m
"""
# pylint: disable=too-many-locals
# numpy seems to do tricks with its exports.
# pylint: disable=no-member
# map() is recommended against.
# pylint: disable=bad-builtin
rate1 = [x[0] for x in metric_set1]
psnr1 = [x[1] for x in metric_set1]
rate2 = [x[0] for x in metric_set2]
psnr2 = [x[1] for x in metric_set2]
log_rate1 = map(math.log, rate1)
log_rate2 = map(math.log, rate2)
# Best cubic poly fit for graph represented by log_ratex, psrn_x.
poly1 = numpy.polyfit(log_rate1, psnr1, 3)
poly2 = numpy.polyfit(log_rate2, psnr2, 3)
# Integration interval.
min_int = max([min(log_rate1), min(log_rate2)])
max_int = min([max(log_rate1), max(log_rate2)])
# Integrate poly1, and poly2.
p_int1 = numpy.polyint(poly1)
p_int2 = numpy.polyint(poly2)
# Calculate the integrated value over the interval we care about.
int1 = numpy.polyval(p_int1, max_int) - numpy.polyval(p_int1, min_int)
int2 = numpy.polyval(p_int2, max_int) - numpy.polyval(p_int2, min_int)
# Calculate the average improvement.
if max_int != min_int:
avg_diff = (int2 - int1) / (max_int - min_int)
else:
avg_diff = 0.0
return avg_diff
def bdrate(metric_set1, metric_set2):
"""
BJONTEGAARD Bjontegaard metric calculation
Bjontegaard's metric allows to compute the average % saving in bitrate
between two rate-distortion curves [1].
rate1,psnr1 - RD points for curve 1
rate2,psnr2 - RD points for curve 2
adapted from code from: (c) 2010 Giuseppe Valenzise
"""
# numpy plays games with its exported functions.
# pylint: disable=no-member
# pylint: disable=too-many-locals
# pylint: disable=bad-builtin
rate1 = [x[0] for x in metric_set1]
psnr1 = [x[1] for x in metric_set1]
rate2 = [x[0] for x in metric_set2]
psnr2 = [x[1] for x in metric_set2]
log_rate1 = map(math.log, rate1)
log_rate2 = map(math.log, rate2)
# Best cubic poly fit for graph represented by log_ratex, psrn_x.
poly1 = numpy.polyfit(psnr1, log_rate1, 3)
poly2 = numpy.polyfit(psnr2, log_rate2, 3)
# Integration interval.
min_int = max([min(psnr1), min(psnr2)])
max_int = min([max(psnr1), max(psnr2)])
# find integral
p_int1 = numpy.polyint(poly1)
p_int2 = numpy.polyint(poly2)
# Calculate the integrated value over the interval we care about.
int1 = numpy.polyval(p_int1, max_int) - numpy.polyval(p_int1, min_int)
int2 = numpy.polyval(p_int2, max_int) - numpy.polyval(p_int2, min_int)
# Calculate the average improvement.
avg_exp_diff = (int2 - int1) / (max_int - min_int)
# In really bad formed data the exponent can grow too large.
# clamp it.
if avg_exp_diff > 200:
avg_exp_diff = 200
# Convert to a percentage.
avg_diff = (math.exp(avg_exp_diff) - 1) * 100
return avg_diff
def FillForm(string_for_substitution, dictionary_of_vars):
"""
This function substitutes all matches of the command string //%% ... %%//
with the variable represented by ... .
"""
return_string = string_for_substitution
for i in re.findall("//%%(.*)%%//", string_for_substitution):
return_string = re.sub("//%%" + i + "%%//", dictionary_of_vars[i],
return_string)
return return_string
def HasMetrics(line):
"""
The metrics files produced by vpxenc are started with a B for headers.
"""
if line[0:1] != "B" and len(string.split(line)) > 0:
return True
return False
def ParseMetricFile(file_name, metric_column):
"""
Convert a metrics file into a set of numbers.
This returns a sorted list of tuples with the first number
being from the first column (bitrate) and the second being from
metric_column (counting from 0).
"""
metric_set1 = set([])
metric_file = open(file_name, "r")
for line in metric_file:
metrics = string.split(line)
if HasMetrics(line):
if metric_column < len(metrics):
my_tuple = float(metrics[0]), float(metrics[metric_column])
else:
my_tuple = float(metrics[0]), 0
metric_set1.add(my_tuple)
metric_set1_sorted = sorted(metric_set1)
return metric_set1_sorted
def GraphBetter(metric_set1_sorted, metric_set2_sorted, use_set2_as_base):
"""
Search through the sorted metric set for metrics on either side of
the metric from file 1. Since both lists are sorted we really
should not have to search through the entire range, but these
are small lists."""
# pylint: disable=too-many-locals
total_bitrate_difference_ratio = 0.0
count = 0
# TODO(hta): Replace whole thing with a call to numpy.interp()
for bitrate, metric in metric_set1_sorted:
for i in range(len(metric_set2_sorted) - 1):
s2_bitrate_0, s2_metric_0 = metric_set2_sorted[i]
s2_bitrate_1, s2_metric_1 = metric_set2_sorted[i + 1]
# We have a point on either side of our metric range.
if s2_metric_0 < metric <= s2_metric_1:
# Calculate a slope.
if s2_metric_1 - s2_metric_0 != 0:
metric_slope = ((s2_bitrate_1 - s2_bitrate_0) /
(s2_metric_1 - s2_metric_0))
else:
metric_slope = 0
estimated_s2_bitrate = (s2_bitrate_0 + (metric - s2_metric_0) *
metric_slope)
# Calculate percentage difference as given by base.
if use_set2_as_base:
bitrate_difference_ratio = ((bitrate - estimated_s2_bitrate) /
estimated_s2_bitrate)
else:
bitrate_difference_ratio = ((bitrate - estimated_s2_bitrate) /
bitrate)
total_bitrate_difference_ratio += bitrate_difference_ratio
count += 1
break
# Calculate the average improvement between graphs.
if count != 0:
avg = total_bitrate_difference_ratio / count
else:
avg = 0.0
return avg
def DataSetBetter(metric_set1, metric_set2, method):
"""
Compares two data sets and determines which is better and by how
much.
The input metric set is sorted on bitrate.
The first set is the one to compare, the second set is the baseline.
"""
# Be fair to both graphs by testing all the points in each.
if method == 'avg':
avg_improvement = 50 * (
GraphBetter(metric_set1, metric_set2,
use_set2_as_base=True) -
GraphBetter(metric_set2, metric_set1,
use_set2_as_base=False))
elif method == 'dsnr':
avg_improvement = bdsnr(metric_set1, metric_set2)
else:
avg_improvement = bdrate(metric_set2, metric_set1)
return avg_improvement
def FileBetter(file_name_1, file_name_2, metric_column, method):
"""
Compares two data files and determines which is better and by how
much.
metric_column is the metric.
"""
# Store and parse our two files into lists of unique tuples.
# Read the two files, parsing out lines starting with bitrate.
metric_set1_sorted = ParseMetricFile(file_name_1, metric_column)
metric_set2_sorted = ParseMetricFile(file_name_2, metric_column)
return DataSetBetter(metric_set1_sorted, metric_set2_sorted, method)
def HtmlPage(page_template, page_title="", page_subtitle="",
filestable="", snrs="", formatters=""):
"""
Creates a HTML page from the template and variables passed to it.
"""
# pylint: disable=too-many-arguments
# Build up a dictionary of the variables actually used in the template.
my_dict = {
'page_title': page_title,
'page_subtitle': page_subtitle,
'filestable_dpsnr': filestable['dsnr'],
'filestable_avg': filestable['avg'],
'filestable_drate': filestable['drate'],
'snrs': snrs,
'formatters': formatters
}
return FillForm(page_template, my_dict)
def ListOneTarget(codecs, rate, videofile, do_score, datatable,
score_function=None):
"""Extend a datatable with the info about one video file's scores."""
# pylint: disable=too-many-arguments
for codec_name in codecs:
# For testing:
# Allow for direct context injection rather than picking by name.
if isinstance(codec_name, basestring):
codec = pick_codec.PickCodec(codec_name)
my_optimizer = optimizer.Optimizer(codec, score_function=score_function)
else:
my_optimizer = codec_name
codec_name = my_optimizer.context.codec.name
best_encoding = my_optimizer.BestEncoding(rate, videofile)
if do_score and not best_encoding.Result():
best_encoding.Execute()
best_encoding.Store()
AddOneEncoding(codec_name, my_optimizer, best_encoding, videofile,
datatable)
def AddOneEncoding(codec_name, my_optimizer, this_encoding, videofile,
datatable):
assert this_encoding.Result()
# Ignore results that score less than zero.
if my_optimizer.Score(this_encoding) < 0.0:
return
# Datatable is a dictionary of codec name -> result sets.
# Each result set is an array containing result info.
# Each result info is a dictionary containing the
# ID of the configuration used, the
# target bitrate, the command line, the score and the result.
(datatable.setdefault(codec_name, {})
.setdefault(videofile.basename, [])
.append({'config_id': this_encoding.encoder.Hashname(),
'target_bitrate': this_encoding.bitrate,
'encode_command': this_encoding.EncodeCommandLine(),
'score': my_optimizer.Score(this_encoding),
'result': this_encoding.ResultWithoutFrameData()}))
def ListMpegResults(codecs, do_score, datatable, score_function=None):
"""List all scores for all tests in the MPEG test set for a set of codecs."""
# It is necessary to sort on target bitrate in order for graphs to display
# correctly.
for rate, filename in sorted(mpeg_settings.MpegFiles().AllFilesAndRates()):
videofile = encoder.Videofile(filename)
ListOneTarget(codecs, rate, videofile, do_score, datatable,
score_function)
def ListMpegSingleConfigResults(codecs, datatable, score_function=None):
encoder_list = {}
optimizer_list = {}
for codec_name in codecs:
codec = pick_codec.PickCodec(codec_name)
my_optimizer = optimizer.Optimizer(codec,
score_function=score_function, file_set=mpeg_settings.MpegFiles())
optimizer_list[codec_name] = my_optimizer
encoder_list[codec_name] = my_optimizer.BestOverallEncoder()
for rate, filename in sorted(mpeg_settings.MpegFiles().AllFilesAndRates()):
videofile = encoder.Videofile(filename)
for codec_name in codecs:
if encoder_list[codec_name]:
my_encoding = encoder_list[codec_name].Encoding(rate, videofile)
my_encoding.Recover()
AddOneEncoding(codec_name, optimizer_list[codec_name],
my_encoding, videofile, datatable)
def ExtractBitrateAndPsnr(datatable, codec, filename):
dataset = [(r['result']['bitrate'], r['result']['psnr'])
for r in datatable[codec][filename]]
return dataset
def BuildComparisonTable(datatable, metric, baseline_codec, other_codecs):
"""Builds a table of comparison data for this metric."""
# Find the metric files in the baseline codec.
videofile_name_list = datatable[baseline_codec].keys()
countoverall = {}
sumoverall = {}
for this_codec in other_codecs:
countoverall[this_codec] = 0
sumoverall[this_codec] = 0
# Data holds the data for the visualization, name given comes from
# gviz_api sample code.
data = []
for filename in videofile_name_list:
row = {'file': filename}
baseline_dataset = ExtractBitrateAndPsnr(datatable,
baseline_codec,
filename)
# Read the metric file from each of the directories in our list.
for this_codec in other_codecs:
# If there is a metric in this_codec, calculate the overall difference
# between it and the baseline codec's metric.
if (this_codec in datatable and filename in datatable[this_codec]
and filename in datatable[baseline_codec]):
this_dataset = ExtractBitrateAndPsnr(datatable,
this_codec,
filename)
overall = DataSetBetter(
baseline_dataset, this_dataset, metric)
if not math.isnan(overall):
# TODO(hta): figure out when DataSetBetter generates NaN
row[this_codec] = overall
sumoverall[this_codec] += overall
countoverall[this_codec] += 1
data.append(row)
# Add the overall numbers.
row = {"file": "OVERALL " + metric}
for this_codec in other_codecs:
if countoverall[this_codec]:
row[this_codec] = sumoverall[this_codec] / countoverall[this_codec]
data.append(row)
return data
def BuildGvizDataTable(datatable, metric, baseline_codec, other_codecs):
"""Builds a Gviz DataTable giving this metric for the files and codecs."""
description = {"file": ("string", "File")}
data = BuildComparisonTable(datatable, metric, baseline_codec, other_codecs)
for this_codec in other_codecs:
description[this_codec] = ("number", this_codec)
# Generate the gViz table
gviz_data_table = gviz_api.DataTable(description)
gviz_data_table.LoadData(data)
return gviz_data_table
def CrossPerformanceGvizTable(datatable, metric, codecs, criterion):
"""Build a square table of codecs and relative performance."""
# pylint: disable=too-many-locals
videofile_name_list = datatable[codecs[0]].keys()
description = {}
description['codec'] = ('string', 'Codec')
data = []
for codec in codecs:
description[codec] = ('string', codec)
for codec1 in codecs:
lineitem = {'codec': codec1}
for codec2 in codecs:
if codec1 != codec2:
count = 0
overall = 0.0
for filename in videofile_name_list:
if (codec1 in datatable and filename in datatable[codec1]
and codec2 in datatable and filename in datatable[codec2]):
overall += DataSetBetter(
ExtractBitrateAndPsnr(datatable, codec2, filename),
ExtractBitrateAndPsnr(datatable, codec1, filename), metric)
count += 1
if count > 0:
display = ('<a href=/results/show_result.html?' +
'codec1=%s&codec2=%s&criterion=%s>%5.2f</a>') % (
codec2, codec1, criterion, overall / count)
lineitem[codec2] = (overall / count, display)
data.append(lineitem)
gviz_data_table = gviz_api.DataTable(description)
gviz_data_table.LoadData(data)
return gviz_data_table
|
py | 1a38e286e79b035c7e70d89e06198032ee6c6b92 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# torchgan documentation build configuration file, created by
# sphinx-quickstart on Sat Oct 6 13:31:50 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import time
import sphinx_rtd_theme
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Mock Imports
autodoc_mock_imports = ["torch", "pillow", "torchvision", "tensorboardX", "visdom"]
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
"sphinx.ext.napoleon",
# 'sphinx_gallery.gen_gallery'
]
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# # Sphinx Gallery configuration
# sphinx_gallery_conf = {
# # path to your examples scripts
# 'examples_dirs': 'tutorial',
# # path where to save gallery generated examples
# 'gallery_dirs': 'tutorials',
# # which examples to execute
# 'filename_pattern': '/tutorial_',
# # intersphinx
# 'reference_url': {
# # The module you locally document uses None
# 'torchgan': None,
# },
# # binder
# 'binder': {
# # Required keys
# 'org': 'torchgan',
# 'repo': 'torchgan',
# 'url': 'https://mybinder.org', # Any URL of a binder server. Must be full URL (e.g. https://mybinder.org).
# 'branch': 'master', # Can be any branch, tag, or commit hash. Use a branch that hosts your docs.
# 'dependencies': 'requirements.txt',
# 'use_jupyter_lab': True # Whether Binder links should start Jupyter Lab instead of the Jupyter Notebook interface.
# },
# 'show_memory': True,
# 'thumbnail_size': (300, 300),
# }
#
# # generate autosummary even if no references
# autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = [".rst", ".md"]
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"torchgan"
copyright = u"2018-{}, Avik Pal & Aniket Das".format(time.strftime("%Y"))
author = "Avik Pal & Aniket Das"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
version = "v0.0.4"
# The full version, including alpha/beta/rc tags.
release = "v0.0.4"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"canonical_url": "",
"analytics_id": "",
"logo_only": False,
"display_version": True,
"prev_next_buttons_location": "bottom",
"style_external_links": True,
# Toc options
"collapse_navigation": False,
"sticky_navigation": False,
"navigation_depth": 4,
"includehidden": True,
"titles_only": False,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
# html_sidebars = {
# '**': ['searchbox.html', 'globaltoc_custom.html'],
# 'using/windows': ['searchbox.html', 'windowssidebar.html'],
# }
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "torchgandoc"
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
def setup(app):
# app.add_javascript("custom.js")
app.add_stylesheet("theme_overrides.css")
else:
# Override default css to get a larger width for ReadTheDoc build
html_context = {
"css_files": [
"https://media.readthedocs.org/css/sphinx_rtd_theme.css",
"https://media.readthedocs.org/css/readthedocs-doc-embed.css",
"_static/theme_overrides.css",
]
}
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"torchgan.tex",
"torchgan Documentation",
"Avik Pal and Aniket Das",
"manual",
)
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "torchgan", "torchgan Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"torchgan",
"torchgan Documentation",
author,
"torchgan",
"One line description of project.",
"Miscellaneous",
)
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
"numpy": ("https://docs.scipy.org/doc/numpy/", None),
"pytorch": ("https://pytorch.org/docs/stable", None),
}
|
py | 1a38e2c47f4eb732beba1030477333c2e23ebe01 | import tensorflow as tf
import numpy as np
from networks.select import select_G
from dataset import train_dataset_sim, test_dataset_sim
from loss import G_loss
from args import parse_args
import metasurface.solver as solver
import metasurface.conv as conv
import scipy.optimize as scp_opt
import os
import time
## Logging for TensorBoard
def log(img, gt_img, Phase_var, G, snr, vgg_model, summary_writer, step, params, args):
# Metasurface simulation
if args.psf_mode == 'SIM_PSF':
solver.set_wavelengths(params, params['lambda_base'])
psfs_debug, psfs_conv_forward = solver.get_psfs(Phase_var * args.bound_val, params, conv_mode=args.conv, aug_rotate=args.aug_rotate)
psfs_conv_deconv = psfs_conv_forward
if args.offset:
# This allow for spatial sensitivity training
psfs_conv_forward = psfs_conv_forward[1:,:,:,:]
psfs_conv_deconv = psfs_conv_deconv[:-1,:,:,:]
assert(psfs_conv_forward.shape[0] == psfs_conv_deconv.shape[0])
elif args.psf_mode == 'REAL_PSF':
real_psf = np.load(args.real_psf)
real_psf = tf.constant(real_psf, dtype=tf.float32)
real_psf = tf.image.resize_with_crop_or_pad(real_psf, params['psf_width'], params['psf_width'])
real_psf = real_psf / tf.reduce_sum(real_psf, axis=(1,2), keepdims=True)
psfs_debug = real_psf
psfs_conv_forward = real_psf
psfs_conv_deconv = real_psf
else:
assert False, ("Unsupported PSF mode")
conv_image = params['conv_fn'](img, psfs_conv_forward)
sensor_img = solver.sensor_noise(conv_image, params)
_, G_img, G_debug = params['deconv_fn'](sensor_img, psfs_conv_deconv, snr, G, training=False)
# Losses
gt_img = tf.image.resize_with_crop_or_pad(gt_img, params['out_width'], params['out_width'])
G_Content_loss_val, G_loss_components, G_metrics = G_loss(G_img, gt_img, vgg_model, args)
# Save records to TensorBoard
with summary_writer.as_default():
# Images
tf.summary.image(name = 'Input/Input' , data=img, step=step)
tf.summary.image(name = 'Input/GT' , data=gt_img, step=step)
if args.offset:
num_patches = np.size(params['theta_base']) - 1
else:
num_patches = np.size(params['theta_base'])
for i in range(num_patches):
tf.summary.image(name = 'Output/Output_'+str(i), data=G_img[i:i+1,:,:,:], step=step)
tf.summary.image(name = 'Blur/Blur_'+str(i), data=conv_image[i:i+1,:,:,:], step=step)
tf.summary.image(name = 'Sensor/Sensor_'+str(i), data=sensor_img[i:i+1,:,:,:], step=step)
for j, debug in enumerate(G_debug):
tf.summary.image(name = 'Debug/Debug_'+str(j)+'_'+str(i), data=debug[i:i+1,:,:,:] , step=step)
# PSF
for i in range(np.size(params['theta_base'])):
psf_patch = psfs_debug[i:i+1,:,:,:]
tf.summary.image(name='PSF/PSF_'+str(i),
data=psf_patch / tf.reduce_max(psf_patch), step=step)
for l in range(np.size(params['lambda_base'])):
psf_patch = psfs_debug[i:i+1,:,:,l:l+1]
tf.summary.image(name='PSF_'+str(params['lambda_base'][l])+'/PSF_'+str(i),
data=psf_patch / tf.reduce_max(psf_patch), step=step)
for i in range(Phase_var.shape[0]):
tf.summary.scalar(name = 'Phase/Phase_'+str(i), data=Phase_var[i], step=step)
# Metrics
tf.summary.scalar(name = 'metrics/G_PSNR', data = G_metrics['PSNR'], step=step)
tf.summary.scalar(name = 'metrics/G_SSIM', data = G_metrics['SSIM'], step=step)
tf.summary.scalar(name = 'snr', data = snr, step=step)
# Content losses
tf.summary.scalar(name = 'loss/G_Content_loss', data = G_Content_loss_val, step=step)
tf.summary.scalar(name = 'loss/G_Norm_loss' , data = G_loss_components['Norm'], step=step)
tf.summary.scalar(name = 'loss/G_P_loss' , data = G_loss_components['P'], step=step)
tf.summary.scalar(name = 'loss/G_Spatial_loss', data = G_loss_components['Spatial'], step=step)
## Optimization Step
def train_step(mode, img, gt_img, Phase_var, Phase_optimizer, G, G_optimizer, snr, vgg_model, params, args):
with tf.GradientTape() as G_tape:
# Metasurface simulation
if args.psf_mode == 'SIM_PSF':
solver.set_wavelengths(params, params['lambda_base'])
psfs_debug, psfs_conv_forward = solver.get_psfs(Phase_var * args.bound_val, params, conv_mode=args.conv, aug_rotate=args.aug_rotate)
psfs_conv_deconv = psfs_conv_forward
if args.offset:
# This allow for spatial sensitivity training
psfs_conv_forward = psfs_conv_forward[1:,:,:,:]
psfs_conv_deconv = psfs_conv_deconv[:-1,:,:,:]
assert(psfs_conv_forward.shape[0] == psfs_conv_deconv.shape[0])
elif args.psf_mode == 'REAL_PSF':
real_psf = np.load(args.real_psf)
real_psf = tf.constant(real_psf, dtype=tf.float32)
real_psf = tf.image.resize_with_crop_or_pad(real_psf, params['psf_width'], params['psf_width'])
real_psf = real_psf / tf.reduce_sum(real_psf, axis=(1,2), keepdims=True)
psfs_debug = real_psf
psfs_conv_forward = real_psf
psfs_conv_deconv = real_psf
else:
assert False, ("Unsupported PSF mode")
conv_image = params['conv_fn'](img, psfs_conv_forward)
sensor_img = solver.sensor_noise(conv_image, params)
_, G_img, _ = params['deconv_fn'](sensor_img, psfs_conv_deconv, snr, G, training=True)
# Losses
gt_img = tf.image.resize_with_crop_or_pad(gt_img, params['out_width'], params['out_width'])
G_loss_val, G_loss_components, G_metrics = G_loss(G_img, gt_img, vgg_model, args)
# Apply gradients
if mode == 'Phase':
Phase_gradients = G_tape.gradient(G_loss_val, Phase_var)
Phase_optimizer.apply_gradients([(Phase_gradients, Phase_var)])
Phase_var.assign(tf.clip_by_value(Phase_var, -1.0, 1.0)) # Clipped to normalized phase range
elif mode == 'G':
G_vars = G.trainable_variables
if args.snr_opt:
G_vars.append(snr)
G_gradients = G_tape.gradient(G_loss_val, G_vars)
G_optimizer.apply_gradients(zip(G_gradients, G_vars))
if args.snr_opt:
snr.assign(tf.clip_by_value(snr, 3.0, 4.0))
else:
assert False, "Non-existant training mode"
## Training loop
def train(args):
## Metasurface
params = solver.initialize_params(args)
if args.metasurface == 'random':
phase_initial = np.random.uniform(low = -args.bound_val, high = args.bound_val, size = params['num_coeffs'])
elif args.metasurface == 'zeros':
phase_initial = np.zeros(params['num_coeffs'], dtype=np.float32)
elif args.metasurface == 'single':
phase_initial = np.array([-np.pi * (params['Lx'] * params['pixelsX'] / 2) ** 2 / params['wavelength_nominal'] / params['f'], 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
elif args.metasurface == 'neural':
# Best parameters with neural optimization
phase_initial = np.array([-0.3494864 , -0.00324192, -1. , -1. ,
-1. , -1. , -1. , -1. ], dtype=np.float32)
phase_initial = phase_initial * args.bound_val # <-- should be 1000
assert(args.bound_val == 1000)
else:
if args.metasurface == 'log_asphere':
phase_log = solver.log_asphere_phase(args.s1, args.s2, params)
elif args.metasurface == 'shifted_axicon':
phase_log = solver.shifted_axicon_phase(args.s1, args.s2, params)
elif args.metasurface == 'squbic':
phase_log = solver.squbic_phase(args.A, params)
elif args.metasurface == 'hyperboidal':
phase_log = solver.hyperboidal_phase(args.target_wavelength, params)
elif args.metasurface == 'cubic':
phase_log = solver.cubic_phase(args.alpha, args.target_wavelength, params) # Only for direct inference
else:
assert False, ("Unsupported metasurface mode")
params['general_phase'] = phase_log # For direct phase inference
if args.use_general_phase:
assert(args.Phase_iters == 0)
# For optimization
lb = (params['pixelsX'] - params['pixels_aperture']) // 2
ub = (params['pixelsX'] + params['pixels_aperture']) // 2
x = params['x_mesh'][lb : ub, 0] / (0.5 * params['pixels_aperture'] * params['Lx'])
phase_slice = phase_log[0, lb : ub, params['pixelsX'] // 2]
p_fit, _ = scp_opt.curve_fit(params['phase_func'], x, phase_slice, bounds=(-args.bound_val, args.bound_val))
phase_initial = p_fit
print('Initial Phase: {}'.format(phase_initial), flush=True)
print('Image width: {}'.format(params['image_width']), flush=True)
# Normalize the phases within the bounds
phase_initial = phase_initial / args.bound_val
Phase_var = tf.Variable(phase_initial, dtype = tf.float32)
Phase_optimizer = tf.keras.optimizers.Adam(args.Phase_lr, beta_1=args.Phase_beta1)
# SNR term for deconvolution algorithm
snr = tf.Variable(args.snr_init, dtype=tf.float32)
# Do not optimize phase during finetuning
if args.psf_mode == 'REAL_PSF':
assert(args.Phase_iters == 0)
# Convolution mode
if args.offset:
assert(len(args.batch_weights) == len(args.theta_base) - 1)
else:
assert(len(args.batch_weights) == len(args.theta_base))
params['conv_fn'] = conv.convolution_tf(params, args)
params['deconv_fn'] = conv.deconvolution_tf(params, args)
## Network architectures
G = select_G(params, args)
G_optimizer = tf.keras.optimizers.Adam(args.G_lr, beta_1=args.G_beta1)
## Construct vgg for perceptual loss
if not args.P_loss_weight == 0:
vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet')
vgg_layers = [vgg.get_layer(name).output for name in args.vgg_layers.split(',')]
vgg_model = tf.keras.Model(inputs=vgg.input, outputs=vgg_layers)
vgg_model.trainable = False
else:
vgg_model = None
## Saving the model
checkpoint = tf.train.Checkpoint(Phase_optimizer=Phase_optimizer, Phase_var=Phase_var, G_optimizer=G_optimizer, G=G, snr=snr)
max_to_keep = args.max_to_keep
if args.max_to_keep == 0:
max_to_keep = None
manager = tf.train.CheckpointManager(checkpoint, directory=args.save_dir, max_to_keep=max_to_keep)
## Loading pre-trained model if exists
if not args.ckpt_dir == None:
status = checkpoint.restore(tf.train.latest_checkpoint(args.ckpt_dir, latest_filename=None))
status.expect_partial() # Silence warnings
#status.assert_existing_objects_matched() # Only partial load for networks (we don't load the optimizers)
#status.assert_consumed()
## Create summary writer for TensorBoard
summary_writer = tf.summary.create_file_writer(args.save_dir)
## Dataset
train_ds = iter(train_dataset_sim(params['out_width'], params['load_width'], args))
test_ds = list(test_dataset_sim(params['out_width'], params['load_width'], args).take(1))
## Do training
for step in range(args.steps):
start = time.time()
if step % args.save_freq == 0:
print('Saving', flush=True)
manager.save()
if step % args.log_freq == 0:
print('Logging', flush=True)
test_batch = test_ds[0]
img = test_batch[0]
gt_img = test_batch[1]
log(img, gt_img, Phase_var, G, snr, vgg_model, summary_writer, step, params, args)
for _ in range(args.Phase_iters):
img_batch = next(train_ds)
img = img_batch[0]
gt_img = img_batch[1]
train_step('Phase', img, gt_img, Phase_var, Phase_optimizer, G, G_optimizer, snr, vgg_model, params, args)
for _ in range(args.G_iters):
img_batch = next(train_ds)
img = img_batch[0]
gt_img = img_batch[1]
train_step('G', img, gt_img, Phase_var, Phase_optimizer, G, G_optimizer, snr, vgg_model, params, args)
print("Step time: {}\n".format(time.time() - start), flush=True)
## Entry point
def main():
args = parse_args()
train(args)
if __name__ == '__main__':
main()
|
py | 1a38e69e972423f7ad5aa4443e697d923a97d429 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['PrivateEndpointConnectionArgs', 'PrivateEndpointConnection']
@pulumi.input_type
class PrivateEndpointConnectionArgs:
def __init__(__self__, *,
private_link_service_connection_state: pulumi.Input['PrivateLinkServiceConnectionStateArgs'],
resource_group_name: pulumi.Input[str],
workspace_name: pulumi.Input[str],
identity: Optional[pulumi.Input['IdentityArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input['SkuArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a PrivateEndpointConnection resource.
:param pulumi.Input['PrivateLinkServiceConnectionStateArgs'] private_link_service_connection_state: A collection of information about the state of the connection between service consumer and provider.
:param pulumi.Input[str] resource_group_name: Name of the resource group in which workspace is located.
:param pulumi.Input[str] workspace_name: Name of Azure Machine Learning workspace.
:param pulumi.Input['IdentityArgs'] identity: The identity of the resource.
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection associated with the workspace
:param pulumi.Input['SkuArgs'] sku: The sku of the workspace.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Contains resource tags defined as key/value pairs.
"""
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "workspace_name", workspace_name)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if location is not None:
pulumi.set(__self__, "location", location)
if private_endpoint_connection_name is not None:
pulumi.set(__self__, "private_endpoint_connection_name", private_endpoint_connection_name)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> pulumi.Input['PrivateLinkServiceConnectionStateArgs']:
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@private_link_service_connection_state.setter
def private_link_service_connection_state(self, value: pulumi.Input['PrivateLinkServiceConnectionStateArgs']):
pulumi.set(self, "private_link_service_connection_state", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of the resource group in which workspace is located.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="workspaceName")
def workspace_name(self) -> pulumi.Input[str]:
"""
Name of Azure Machine Learning workspace.
"""
return pulumi.get(self, "workspace_name")
@workspace_name.setter
def workspace_name(self, value: pulumi.Input[str]):
pulumi.set(self, "workspace_name", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['IdentityArgs']]:
"""
The identity of the resource.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['IdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="privateEndpointConnectionName")
def private_endpoint_connection_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the private endpoint connection associated with the workspace
"""
return pulumi.get(self, "private_endpoint_connection_name")
@private_endpoint_connection_name.setter
def private_endpoint_connection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_endpoint_connection_name", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['SkuArgs']]:
"""
The sku of the workspace.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input['SkuArgs']]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class PrivateEndpointConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
identity: Optional[pulumi.Input[pulumi.InputType['IdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStateArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
The Private Endpoint Connection resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['IdentityArgs']] identity: The identity of the resource.
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection associated with the workspace
:param pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStateArgs']] private_link_service_connection_state: A collection of information about the state of the connection between service consumer and provider.
:param pulumi.Input[str] resource_group_name: Name of the resource group in which workspace is located.
:param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The sku of the workspace.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Contains resource tags defined as key/value pairs.
:param pulumi.Input[str] workspace_name: Name of Azure Machine Learning workspace.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PrivateEndpointConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The Private Endpoint Connection resource.
:param str resource_name: The name of the resource.
:param PrivateEndpointConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PrivateEndpointConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
identity: Optional[pulumi.Input[pulumi.InputType['IdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStateArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
__props__.__dict__["identity"] = identity
__props__.__dict__["location"] = location
__props__.__dict__["private_endpoint_connection_name"] = private_endpoint_connection_name
if private_link_service_connection_state is None and not opts.urn:
raise TypeError("Missing required property 'private_link_service_connection_state'")
__props__.__dict__["private_link_service_connection_state"] = private_link_service_connection_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["sku"] = sku
__props__.__dict__["tags"] = tags
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__.__dict__["workspace_name"] = workspace_name
__props__.__dict__["name"] = None
__props__.__dict__["private_endpoint"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20210401:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:machinelearningservices:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:machinelearningservices:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200101:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200101:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200218preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200218preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200301:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200301:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200401:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200401:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200501preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200501preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200515preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200515preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200601:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200601:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200801:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200801:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200901preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200901preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:machinelearningservices/v20210101:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20210101:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:machinelearningservices/v20210301preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20210301preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:machinelearningservices/v20210701:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20210701:PrivateEndpointConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateEndpointConnection, __self__).__init__(
'azure-native:machinelearningservices/v20210401:PrivateEndpointConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateEndpointConnection':
"""
Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
__props__.__dict__["identity"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["private_endpoint"] = None
__props__.__dict__["private_link_service_connection_state"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return PrivateEndpointConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.IdentityResponse']]:
"""
The identity of the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> pulumi.Output[Optional['outputs.PrivateEndpointResponse']]:
"""
The resource of private end point.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> pulumi.Output['outputs.PrivateLinkServiceConnectionStateResponse']:
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the private endpoint connection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]:
"""
The sku of the workspace.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Read only system data
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Specifies the type of the resource.
"""
return pulumi.get(self, "type")
|
py | 1a38e7d1f5b051480bd2a51470044c2d1994ab8c | import os
import numpy as np
from PIL import Image
from .seg_dataset import SegDataset
from .voc_seg_dataset import VOCMetaInfo
class CityscapesSegDataset(SegDataset):
"""
Cityscapes semantic segmentation dataset.
Parameters:
----------
root : str
Path to a folder with `leftImg8bit` and `gtFine` subfolders.
mode : str, default 'train'
'train', 'val', 'test', or 'demo'.
transform : callable, optional
A function that transforms the image.
"""
def __init__(self,
root,
mode="train",
transform=None,
**kwargs):
super(CityscapesSegDataset, self).__init__(
root=root,
mode=mode,
transform=transform,
**kwargs)
image_dir_path = os.path.join(root, "leftImg8bit")
mask_dir_path = os.path.join(root, "gtFine")
assert os.path.exists(image_dir_path) and os.path.exists(mask_dir_path), "Please prepare dataset"
mode_dir_name = "train" if mode == "train" else "val"
image_dir_path = os.path.join(image_dir_path, mode_dir_name)
# mask_dir_path = os.path.join(mask_dir_path, mode_dir_name)
self.images = []
self.masks = []
for image_subdir_path, _, image_file_names in os.walk(image_dir_path):
for image_file_name in image_file_names:
if image_file_name.endswith(".png"):
image_file_path = os.path.join(image_subdir_path, image_file_name)
mask_file_name = image_file_name.replace("leftImg8bit", "gtFine_labelIds")
mask_subdir_path = image_subdir_path.replace("leftImg8bit", "gtFine")
mask_file_path = os.path.join(mask_subdir_path, mask_file_name)
if os.path.isfile(mask_file_path):
self.images.append(image_file_path)
self.masks.append(mask_file_path)
else:
print("Cannot find the mask: {}".format(mask_file_path))
assert (len(self.images) == len(self.masks))
if len(self.images) == 0:
raise RuntimeError("Found 0 images in subfolders of: {}\n".format(image_dir_path))
self.add_getter('img', self._get_image)
self.add_getter('label', self._get_label)
def _get_image(self, i):
image = Image.open(self.images[i]).convert("RGB")
assert (self.mode in ("test", "demo"))
image = self._img_transform(image)
if self.transform is not None:
image = self.transform(image)
return image
def _get_label(self, i):
if self.mode == "demo":
return os.path.basename(self.images[i])
assert (self.mode == "test")
mask = Image.open(self.masks[i])
mask = self._mask_transform(mask)
return mask
classes = 19
vague_idx = 19
use_vague = True
background_idx = -1
ignore_bg = False
_key = np.array([-1, -1, -1, -1, -1, -1,
-1, -1, 0, 1, -1, -1,
2, 3, 4, -1, -1, -1,
5, -1, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15,
-1, -1, 16, 17, 18])
_mapping = np.array(range(-1, len(_key) - 1)).astype(np.int32)
@staticmethod
def _class_to_index(mask):
values = np.unique(mask)
for value in values:
assert(value in CityscapesSegDataset._mapping)
index = np.digitize(mask.ravel(), CityscapesSegDataset._mapping, right=True)
return CityscapesSegDataset._key[index].reshape(mask.shape)
@staticmethod
def _mask_transform(mask):
np_mask = np.array(mask).astype(np.int32)
np_mask = CityscapesSegDataset._class_to_index(np_mask)
np_mask[np_mask == -1] = CityscapesSegDataset.vague_idx
return np_mask
def __len__(self):
return len(self.images)
class CityscapesMetaInfo(VOCMetaInfo):
def __init__(self):
super(CityscapesMetaInfo, self).__init__()
self.label = "Cityscapes"
self.short_label = "voc"
self.root_dir_name = "cityscapes"
self.dataset_class = CityscapesSegDataset
self.num_classes = CityscapesSegDataset.classes
self.test_metric_extra_kwargs = [
{"vague_idx": CityscapesSegDataset.vague_idx,
"use_vague": CityscapesSegDataset.use_vague,
"macro_average": False},
{"num_classes": CityscapesSegDataset.classes,
"vague_idx": CityscapesSegDataset.vague_idx,
"use_vague": CityscapesSegDataset.use_vague,
"bg_idx": CityscapesSegDataset.background_idx,
"ignore_bg": CityscapesSegDataset.ignore_bg,
"macro_average": False}]
self.test_net_extra_kwargs = self.net_extra_kwargs
|
py | 1a38e85532dda5d4074a3350badc74ae63f26f03 | """
Django settings for contacts project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'de=llqstba$l4tip@z==gd*4p-(ll9+ozq78dz8=6^j6mxq^a5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'widget_tweaks',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'contacts.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'contacts.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
py | 1a38e8e624b53d34785ca2f3647617204ada1885 | from flask import Flask, jsonify # 新增代码。装入Flask
import pandas as pd
app = Flask(__name__) # 新增代码
@app.route("/") # 新增代码,对应执行root()函数
def root():
return app.send_static_file("visual.html")
@app.route("/getData1")
def getData1():
df = pd.read_csv("./out/PeopleInSubwayTime.csv")
data = [df.iloc[:, 0].tolist(), df.iloc[:, 1].tolist()]
print(data)
return jsonify(data)
@app.route("/getData2")
def getData2():
df = pd.read_csv("./out/PeopleInSubwayCount.csv")
data = [df.iloc[:, 0].tolist(), df.iloc[:, 1].tolist()]
print(data)
return jsonify(data)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=80, debug=True)
# eof
|
py | 1a38ea196a1a53725d37f2d886ed6605914be4a7 | # encoding: utf-8
import pyparsing as pyp
import re
def to_obj(result):
'''Convert nested ParseResults structure to list / dict.
Args:
result (ParseResults) : pyparsing result
Returns:
list / dict containing results
'''
d = result.asDict()
if d:
for k in d:
if isinstance(d[k], pyp.ParseResults):
d[k] = to_obj(d[k])
return d
l = result.asList()
for idx, v in enumerate(l):
if isinstance(v, pyp.ParseResults):
l[idx] = to_obj(v)
return l
def scan(pattern, string):
'''Scan a string for repeated occurrences of a pattern.
Args:
pattern (pyparsing pattern) : pattern to be applied
string (str) : text to be parsed
Returns:
list of matches as list / dict
'''
return [to_obj(match[0]) for match in
pattern.scanString(string)]
# ParseAction functions
def joiner(delim):
return lambda tokens: delim.join(tokens)
def parse_authors_factory(author_splitter, name_splitter):
'''Create a function for splitting author strings.
Args:
author_splitter (str) : pattern for splitting authors
name_splitter (str) : pattern for splitting names w/in an author
Returns:
author-splitter function
'''
def parse_authors(tokens):
authors = []
# Note: Since this action is normally chained after
# a joiner() action, only consider the 0th token
for token in re.split(author_splitter, tokens[0]):
if not token:
continue
token_split = re.split(name_splitter, token)
author = {}
author['family'] = token_split[0]
if len(token_split) > 1:
author['given'] = token_split[1]
authors.append(author)
return authors
return parse_authors
# Character sets
dash_chars = u'-–'
allowed_chars = u',;:\'"’&?!()'
# Elementary patterns
dashes = pyp.Word(dash_chars)
etal = pyp.Combine('et al' + pyp.ZeroOrMore('.'))
number = pyp.Word(pyp.nums)
date = '(' + number.setResultsName('date') + ')' + pyp.Optional('.')
words_neglook = ~date + ~number + ~etal + ~pyp.Literal('http') + ~pyp.Literal('doi')
word = pyp.Word(pyp.alphanums + dash_chars + allowed_chars)
words = pyp.OneOrMore(words_neglook + word).\
setParseAction(joiner(' '))
word_journal = pyp.Word(pyp.alphanums + dash_chars + allowed_chars + '.')
words_journal = pyp.OneOrMore(words_neglook + word_journal).\
setParseAction(joiner(' '))
# Meta-data patterns
# Note: Remember to copy words pattern to avoid
# changing other patterns
authors = pyp.Group(
words_journal.copy().\
addParseAction(parse_authors_factory(',', '\s'))
).setResultsName('author') + \
pyp.Optional(etal)
title = words.\
setResultsName('title')
journal = words_journal.\
setParseAction(joiner(' ')).\
setResultsName('journal-title')
volume = pyp.Optional(
number.\
setResultsName('volume') + \
pyp.Word(',:')
)
page_range = number + pyp.Suppress(dashes) + number
page_plos = pyp.Combine('e' + number)
pages = pyp.Optional(pyp.Group(page_range | page_plos).\
setResultsName('pages'))
doi = pyp.Optional(
pyp.Suppress(
pyp.Optional('doi:') + \
pyp.Optional('http://dx.doi.org/')
) + \
pyp.Regex(r'\b(10[.][0-9]{4,}(?:[.][0-9]+)*/(?:(?!["&\'<>])\S)+)\b')
).setResultsName('doi')
reference = authors + \
date + \
title + \
'.' + \
journal + \
volume + \
pages + \
pyp.Optional('.') + \
doi
|
py | 1a38ebfae09aa6c407c088d5c49c3837db4200a3 | #!/usr/bin/env python
import pika
import time
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='rabbit'))
channel = connection.channel()
channel.queue_declare(queue='task_queue', durable=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
def callback(ch, method, properties, body):
print(" [x] Received %r" % body)
time.sleep(body.count(b'.'))
print(" [x] Done")
ch.basic_ack(delivery_tag = method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(callback,
queue='task_queue')
channel.start_consuming()
|
py | 1a38ee7d5acdf95dc79014ad1b14880ad33549cb | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/structure/general/shared_cave_wall_damprock_style_01.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
py | 1a38eef37bdeb7cc5246430583b6766844f1e9b5 | """
Plugins resource control over the API.
"""
import logging
from galaxy import exceptions
from galaxy.managers import hdas, histories
from galaxy.web import expose_api
from galaxy.webapps.base.controller import BaseAPIController
log = logging.getLogger(__name__)
class PluginsController(BaseAPIController):
"""
RESTful controller for interactions with plugins.
"""
def __init__(self, app):
super().__init__(app)
self.hda_manager = hdas.HDAManager(app)
self.history_manager = histories.HistoryManager(app)
@expose_api
def index(self, trans, **kwargs):
"""
GET /api/plugins:
"""
registry = self._get_registry(trans)
dataset_id = kwargs.get("dataset_id")
if dataset_id is not None:
hda = self.hda_manager.get_accessible(self.decode_id(dataset_id), trans.user)
return registry.get_visualizations(trans, hda)
else:
return registry.get_plugins()
@expose_api
def show(self, trans, id, **kwargs):
"""
GET /api/plugins/{id}:
"""
registry = self._get_registry(trans)
history_id = kwargs.get("history_id")
if history_id is not None:
history = self.history_manager.get_owned(trans.security.decode_id(history_id), trans.user, current_history=trans.history)
result = {"hdas": []}
for hda in history.contents_iter(types=["dataset"], deleted=False, visible=True):
if registry.get_visualization(trans, id, hda):
result["hdas"].append({
"id": trans.security.encode_id(hda.id),
"name": hda.name
})
else:
result = registry.get_plugin(id).to_dict()
return result
def _get_registry(self, trans):
if not trans.app.visualizations_registry:
raise exceptions.MessageException("The visualization registry has not been configured.")
return trans.app.visualizations_registry
|
py | 1a38ef143285e1d567e4b71cc5e70738907312cd | # Generated by Django 3.1.2 on 2020-11-02 20:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio', models.TextField()),
('image', models.ImageField(default='default.jpg', upload_to='profile_pics')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
py | 1a38f06863e5c7bd7dda1dd1f0be133b50791eeb | #!/usr/bin/env python
from unittest import TestCase
from fundamentals.lists.linked_list import LL
class TestLL(TestCase):
def test_rev_ll(self):
ll = LL()
ll.add(3).add(4).add(5).add(6)
self.assertEquals("3, 4, 5, 6", ll.head.pretty_walk())
self.assertEquals("6, 5, 4, 3", ll.recursive_reverse_ll().pretty_walk())
self.assertEquals("6, 5, 4, 3", ll.iterative_reverse_ll().pretty_walk())
def test_del(self):
ll = LL()
ll.add(3).add(4).add(5).add(6)
self.assertEquals("3, 4, 5, 6", ll.head.pretty_walk())
ll.delkey(3)
self.assertEquals("4, 5, 6", ll.head.pretty_walk())
ll.delkey(10)
self.assertEquals("4, 5, 6", ll.head.pretty_walk())
ll.delkey(5)
self.assertEquals("4, 6", ll.head.pretty_walk())
ll.delkey(6)
self.assertEquals("4", ll.head.pretty_walk())
|
py | 1a38f1c75b440b8747c8fd95f4bc565694d05d7f | from setuptools import setup, find_packages
import d2l
requirements = [
'jupyter==1.0.0',
'numpy==1.21.5',
'matplotlib==3.5.1',
'requests==2.25.1',
'pandas==1.2.4'
]
setup(
name='d2l',
version=d2l.__version__,
python_requires='>=3.5',
author='D2L Developers',
author_email='[email protected]',
url='https://d2l.ai',
description='Dive into Deep Learning',
license='MIT-0',
packages=find_packages(),
zip_safe=True,
install_requires=requirements,
)
|
py | 1a38f28642900798ac7cd4e9a22d72b46246a40d | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from .render import RenderManager
|
py | 1a38f3adddf4513a1095b6551c7944acf8defeb9 | # Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from tempest.api.identity import base
from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions
from tempest import test
class IdentityV3UsersTest(base.BaseIdentityV3Test):
@classmethod
def resource_setup(cls):
super(IdentityV3UsersTest, cls).resource_setup()
cls.creds = cls.os.credentials
cls.user_id = cls.creds.user_id
cls.username = cls.creds.username
cls.password = cls.creds.password
@test.idempotent_id('ad71bd23-12ad-426b-bb8b-195d2b635f27')
def test_user_update_own_password(self):
def _restore_password(client, user_id, old_pass, new_pass):
# Reset auth to get a new token with the new password
client.auth_provider.clear_auth()
client.auth_provider.credentials.password = new_pass
client.update_user_password(user_id, password=old_pass,
original_password=new_pass)
# Reset auth again to verify the password restore does work.
# Clear auth restores the original credentials and deletes
# cached auth data
client.auth_provider.clear_auth()
# NOTE(lbragstad): Fernet tokens are not subsecond aware and
# Keystone should only be precise to the second. Sleep to ensure we
# are passing the second boundary before attempting to
# authenticate.
time.sleep(1)
client.auth_provider.set_auth()
old_pass = self.creds.password
new_pass = data_utils.rand_password()
user_id = self.creds.user_id
# to change password back. important for allow_tenant_isolation = false
self.addCleanup(_restore_password, self.non_admin_users_client,
user_id, old_pass=old_pass, new_pass=new_pass)
# user updates own password
self.non_admin_users_client.update_user_password(
user_id, password=new_pass, original_password=old_pass)
# NOTE(morganfainberg): Fernet tokens are not subsecond aware and
# Keystone should only be precise to the second. Sleep to ensure
# we are passing the second boundary.
time.sleep(1)
# check authorization with new password
self.non_admin_token.auth(user_id=self.user_id, password=new_pass)
# authorize with old token should lead to IdentityError (404 code)
self.assertRaises(exceptions.IdentityError,
self.non_admin_token.auth,
token=self.non_admin_client.token)
# authorize with old password should lead to Unauthorized
self.assertRaises(exceptions.Unauthorized,
self.non_admin_token.auth,
user_id=self.user_id,
password=old_pass)
|
py | 1a38f4f74c828c18e83364c4ba71da8147ae9b2e | # Copyright (c) 2020, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty, Tags
from .validators import (boolean, integer)
VALID_SIGNIN_ALGORITHM = ('SHA256WITHECDSA', 'SHA256WITHRSA',
'SHA384WITHECDSA', 'SHA384WITHRSA',
'SHA512WITHECDSA', 'SHA512WITHRSA')
VALID_VALIDITY_TYPE = ('ABSOLUTE', 'DAYS', 'END_DATE',
'MONTHS', 'YEARS')
VALID_KEY_ALGORITHM = ('EC_prime256v1', 'EC_secp384r1',
'RSA_2048', 'RSA_4096')
VALID_CERTIFICATEAUTHORITY_TYPE = ('ROOT', 'SUBORDINATE')
def validate_validity_type(validity_type):
"""Certificate Validity Type validation rule."""
if validity_type not in VALID_VALIDITY_TYPE:
raise ValueError("Certificate Validity Type must be one of: %s" %
", ".join(VALID_VALIDITY_TYPE))
return validity_type
def validate_signing_algorithm(signing_algorithm):
"""Certificate SigningAlgorithm validation rule."""
if signing_algorithm not in VALID_SIGNIN_ALGORITHM:
raise ValueError("Certificate SigningAlgorithm must be one of: %s" %
", ".join(VALID_SIGNIN_ALGORITHM))
return signing_algorithm
def validate_key_algorithm(key_algorithm):
"""CertificateAuthority KeyAlgorithm validation rule."""
if key_algorithm not in VALID_KEY_ALGORITHM:
raise ValueError("CertificateAuthority KeyAlgorithm must be one of: %s" % # NOQA
", ".join(VALID_KEY_ALGORITHM))
return key_algorithm
def validate_certificateauthority_type(certificateauthority_type):
"""CertificateAuthority Type validation rule."""
if certificateauthority_type not in VALID_CERTIFICATEAUTHORITY_TYPE:
raise ValueError("CertificateAuthority Type must be one of: %s" %
", ".join(VALID_CERTIFICATEAUTHORITY_TYPE))
return certificateauthority_type
class Validity(AWSProperty):
props = {
'Type': (validate_validity_type, True),
'Value': (integer, True),
}
class Certificate(AWSProperty):
resource_type = "AWS::ACMPCA::Certificate"
props = {
'CertificateAuthorityArn': (basestring, True),
'CertificateSigningRequest': (basestring, True),
'SigningAlgorithm': (validate_signing_algorithm, True),
'TemplateArn': (basestring, False),
'Validity': (Validity, True),
}
class CertificateAuthorityActivation(AWSObject):
resource_type = "AWS::ACMPCA::CertificateAuthorityActivation"
props = {
'Certificate': (basestring, True),
'CertificateAuthorityArn': (basestring, True),
'CertificateChain': (basestring, False),
'Status': (basestring, False),
}
class CrlConfiguration(AWSProperty):
props = {
'CustomCname': (basestring, False),
'Enabled': (boolean, False),
'ExpirationInDays': (integer, False),
'S3BucketName': (basestring, False),
}
class RevocationConfiguration(AWSProperty):
props = {
'CrlConfiguration': (CrlConfiguration, False)
}
class Subject(AWSObject):
props = {
'CommonName': (basestring, False),
'Country': (basestring, False),
'DistinguishedNameQualifier': (basestring, False),
'GenerationQualifier': (basestring, False),
'GivenName': (basestring, False),
'Initials': (basestring, False),
'Locality': (basestring, False),
'Organization': (basestring, False),
'OrganizationalUnit': (basestring, False),
'Pseudonym': (basestring, False),
'SerialNumber': (basestring, False),
'State': (basestring, False),
'Surname': (basestring, False),
'Title': (basestring, False),
}
class CertificateAuthority(AWSObject):
resource_type = "AWS::ACMPCA::CertificateAuthority"
props = {
'KeyAlgorithm': (validate_key_algorithm, True),
'RevocationConfiguration': (RevocationConfiguration, False),
'SigningAlgorithm': (validate_signing_algorithm, True),
'Subject': (Subject, True),
'Tags': (Tags, False),
'Type': (validate_certificateauthority_type, True),
}
|
py | 1a38f53dfb8b112c49a6d9d7569b5e9d457784ac | # -*- coding: utf-8 -*-
# Copyright 2017 Vector Creations Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.storage.data_stores.main.client_ips import LAST_SEEN_GRANULARITY
from synapse.storage.database import Database
from synapse.util.caches.descriptors import Cache
from ._base import BaseSlavedStore
class SlavedClientIpStore(BaseSlavedStore):
def __init__(self, database: Database, db_conn, hs):
super(SlavedClientIpStore, self).__init__(database, db_conn, hs)
self.client_ip_last_seen = Cache(
name="client_ip_last_seen", keylen=4, max_entries=50000
)
def insert_client_ip(self, user_id, access_token, ip, user_agent, device_id):
now = int(self._clock.time_msec())
key = (user_id, access_token, ip)
try:
last_seen = self.client_ip_last_seen.get(key)
except KeyError:
last_seen = None
# Rate-limited inserts
if last_seen is not None and (now - last_seen) < LAST_SEEN_GRANULARITY:
return
self.client_ip_last_seen.prefill(key, now)
self.hs.get_tcp_replication().send_user_ip(
user_id, access_token, ip, user_agent, device_id, now
)
|
py | 1a38f569aa97b28a01d9ec1222dc621f4853c754 | import numpy as np
import math
import cv2
import numpy.random as random
class Compose(object):
"""Composes several augmentations together.
Args:
transforms (List[Transform]): list of transforms to compose.
Example:
>>> augmentations.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, pts=None):
for t in self.transforms:
img, pts = t(img, pts)
return img, pts
class RandomMirror(object):
def __init__(self):
pass
def __call__(self, image, polygons=None):
if np.random.randint(2):
image = np.ascontiguousarray(image[:, ::-1])
_, width, _ = image.shape
for polygon in polygons:
polygon.points[:, 0] = width - polygon.points[:, 0]
return image, polygons
class AugmentColor(object):
def __init__(self):
self.U = np.array([[-0.56543481, 0.71983482, 0.40240142],
[-0.5989477, -0.02304967, -0.80036049],
[-0.56694071, -0.6935729, 0.44423429]], dtype=np.float32)
self.EV = np.array([1.65513492, 0.48450358, 0.1565086], dtype=np.float32)
self.sigma = 0.1
self.color_vec = None
def __call__(self, img, polygons=None):
color_vec = self.color_vec
if self.color_vec is None:
if not self.sigma > 0.0:
color_vec = np.zeros(3, dtype=np.float32)
else:
color_vec = np.random.normal(0.0, self.sigma, 3)
alpha = color_vec.astype(np.float32) * self.EV
noise = np.dot(self.U, alpha.T) * 255
return np.clip(img + noise[np.newaxis, np.newaxis, :], 0, 255), polygons
class RandomContrast(object):
def __init__(self, lower=0.5, upper=1.5):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "contrast upper must be >= lower."
assert self.lower >= 0, "contrast lower must be non-negative."
# expects float image
def __call__(self, image, polygons=None):
if random.randint(2):
alpha = random.uniform(self.lower, self.upper)
image *= alpha
return np.clip(image, 0, 255), polygons
class RandomBrightness(object):
def __init__(self, delta=32):
assert delta >= 0.0
assert delta <= 255.0
self.delta = delta
def __call__(self, image, polygons=None):
image = image.astype(np.float32)
if random.randint(2):
delta = random.uniform(-self.delta, self.delta)
image += delta
return np.clip(image, 0, 255), polygons
class Rotate(object):
def __init__(self, up=30):
self.up = up
def rotate(self, center, pt, theta): # 二维图形学的旋转
xr, yr = center
yr = -yr
x, y = pt[:, 0], pt[:, 1]
y = -y
theta = theta / 360 * 2 * math.pi
cos = math.cos(theta)
sin = math.sin(theta)
_x = xr + (x - xr) * cos - (y - yr) * sin
_y = yr + (x - xr) * sin + (y - yr) * cos
return _x, -_y
def __call__(self, img, polygons=None):
if np.random.randint(2):
return img, polygons
angle = np.random.uniform(-self.up, self.up) #
rows, cols = img.shape[0:2]
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), angle, 1.0)
img = cv2.warpAffine(img, M, (cols, rows), borderValue=[0, 0, 0])
center = cols / 2.0, rows / 2.0
if polygons is not None:
for polygon in polygons:
x, y = self.rotate(center, polygon.points, angle)
pts = np.vstack([x, y]).T
polygon.points = pts
return img, polygons
class SquarePadding(object):
def __call__(self, image, pts=None):
H, W, _ = image.shape
if H == W:
return image, pts
padding_size = max(H, W)
expand_image = np.zeros((padding_size, padding_size, 3), dtype=image.dtype)
if H > W:
y0, x0 = 0, (H - W) // 2
else:
y0, x0 = (W - H) // 2, 0
if pts is not None:
pts[:, 0] += x0
pts[:, 1] += y0
expand_image[y0:y0+H, x0:x0+W] = image
image = expand_image
return image, pts
class Padding(object):
def __init__(self, fill=0):
self.fill = fill
def __call__(self, image, polygons=None):
if np.random.randint(2):
return image, polygons
try:
height, width, depth = image.shape
except:
height, width = image.shape
depth = 1;
ratio = np.random.uniform(1, 2)
left = np.random.uniform(0, width * ratio - width)
top = np.random.uniform(0, height * ratio - height)
expand_image = np.zeros(
(int(height * ratio), int(width * ratio), depth),
dtype=image.dtype)
expand_image[:, :, :] = self.fill
expand_image[int(top):int(top + height),
int(left):int(left + width)] = image
image = expand_image
if polygons is not None:
for polygon in polygons:
polygon.points[:, 0] = polygon.points[:, 0] + left
polygon.points[:, 1] = polygon.points[:, 1] + top
return image, polygons
class RandomResizedCrop(object):
def __init__(self, size, scale=(0.3, 1.0), ratio=(3. / 4., 4. / 3.)):
self.size = (size, size)
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
for attempt in range(10):
area = img.shape[0] * img.shape[1]
target_area = np.random.uniform(*scale) * area
aspect_ratio = np.random.uniform(*ratio)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if np.random.random() < 0.5:
w, h = h, w
if h < img.shape[0] and w < img.shape[1]:
j = np.random.randint(0, img.shape[1] - w)
i = np.random.randint(0, img.shape[0] - h)
return i, j, h, w
# Fallback
w = min(img.shape[0], img.shape[1])
i = (img.shape[0] - w) // 2
j = (img.shape[1] - w) // 2
return i, j, w, w
def __call__(self, image, pts=None):
i, j, h, w = self.get_params(image, self.scale, self.ratio)
cropped = image[i:i + h, j:j + w, :]
pts = pts.copy()
mask = (pts[:, 1] >= i) * (pts[:, 0] >= j) * (pts[:, 1] < (i+h)) * (pts[:, 0] < (j+w))
pts[~mask, 2] = -1
scales = np.array([self.size[0]/w, self.size[1]/h])
pts[:, :2] -= np.array([j, i])
pts[:, :2] = (pts[:, :2] * scales)
img = cv2.resize(cropped, self.size)
return img, pts
class RandomResizedLimitCrop(object):
def __init__(self, size, scale=(0.3, 1.0), ratio=(3. / 4., 4. / 3.)):
self.size = (size, size)
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
for attempt in range(10):
area = img.shape[0] * img.shape[1]
target_area = np.random.uniform(*scale) * area
aspect_ratio = np.random.uniform(*ratio)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if np.random.random() < 0.5:
w, h = h, w
if h < img.shape[0] and w < img.shape[1]:
j = np.random.randint(0, img.shape[1] - w)
i = np.random.randint(0, img.shape[0] - h)
return i, j, h, w
# Fallback
w = min(img.shape[0], img.shape[1])
i = (img.shape[0] - w) // 2
j = (img.shape[1] - w) // 2
return i, j, w, w
def __call__(self, image, polygons=None):
i, j, h, w = self.get_params(image, self.scale, self.ratio)
cropped = image[i:i + h, j:j + w, :]
scales = np.array([self.size[0] / w, self.size[1] / h])
if polygons is not None:
for polygon in polygons:
polygon.points[:, 0] = (polygon.points[:, 0] - j) * scales[0]
polygon.points[:, 1] = (polygon.points[:, 1] - i) * scales[1]
img = cv2.resize(cropped, self.size)
return img, polygons
class Normalize(object):
def __init__(self, mean, std):
self.mean = np.array(mean)
self.std = np.array(std)
def __call__(self, image, polygons=None):
image = image.astype(np.float32)
image /= 255.0
image -= self.mean
image /= self.std
return image, polygons
class Resize(object):
def __init__(self, size=256):
self.size = size
def __call__(self, image, polygons=None):
h, w, _ = image.shape
image = cv2.resize(image, (self.size,
self.size))
scales = np.array([self.size / w, self.size / h])
if polygons is not None:
for polygon in polygons:
polygon.points = polygon.points * scales
return image, polygons
class Augmentation(object):
def __init__(self, size, mean, std):
self.size = size
self.mean = mean
self.std = std
self.augmentation = Compose([
# Resize(size),
Padding(),
RandomResizedLimitCrop(size=size, scale=(0.24, 1.0), ratio=(0.33, 3)),
# RandomBrightness(),
# RandomContrast(),
RandomMirror(),
Rotate(),
Normalize(mean, std)
])
def __call__(self, image, polygons=None):
return self.augmentation(image, polygons)
class BaseTransform(object):
def __init__(self, size, mean, std):
self.size = size
self.mean = mean
self.std = std
self.augmentation = Compose([
Resize(size),
Normalize(mean, std)
])
def __call__(self, image, polygons=None):
return self.augmentation(image, polygons)
|
py | 1a38f5703a3f2f91aa74f1423e3c02c073a05533 | # Author(s): Sehoon Ha <[email protected]>
# : Seungmoon Song <[email protected]>
import numpy as np
from pydart2.utils.misc import S
import pydart2.utils.transformations as trans
from itertools import tee
class AttachmentPoint(object):
"""
"""
def __init__(self, bodyname, offset):
self.bodyname = bodyname
self.skeleton = None
self.body = None
self.offset = np.array(offset)
def is_initialized(self, ):
return (self.body is not None)
def initialize(self, skeleton):
self.skeleton = skeleton
self.body = skeleton.body(self.bodyname)
def to_world(self, ):
return self.body.to_world(self.offset)
def __str__(self, ):
return "(%s, %s)" % (self.bodyname, S(self.offset, 3))
class Route(object):
"""
route = Route([("Upper", [0.0, 0.2, 0.0]), ("Lower", [0.0, 0.2, 0.0])])
"""
def __init__(self, points=None):
if points is None:
self.points = []
else:
self.points = [AttachmentPoint(name, offset)
for name, offset in points]
self.world_points = None
self.world_directions = None
self.length = None
def num_points(self, ):
return len(self.points)
def __len__(self, ):
return self.num_points()
def add_point(self, bodyname, offset):
pt = AttachmentPoint(bodyname=bodyname,
offset=offset)
self.points.append(pt)
def initialize_points(self, skeleton):
for pt in self.points:
pt.initialize(skeleton)
self.local_points = [pt.offset for pt in self.points]
self.bodynodes = [pt.body for pt in self.points]
self.update_geometry_variables()
def update_geometry_variables(self, ):
self.world_points = [pt.to_world() for pt in self.points]
self.length = 0.0
self.world_directions = list()
pt0 = self.world_points[0]
for pt1 in self.world_points[1:]:
diff = pt1 - pt0
length = np.linalg.norm(diff)
direction = diff / length
self.length += length
self.world_directions.append(direction)
pt0 = pt1
def local_points_as_pair(self, ):
"s -> (offset0, offset1), (offset1, offset2), ..."
a, b = tee(self.local_points)
next(b, None)
return zip(a, b)
def bodynodes_as_pair(self, ):
"s -> (body0, body1), (body1, body2), ..."
a, b = tee(self.bodynodes)
next(b, None)
return zip(a, b)
def render_with_ri(self, ri, ):
if self.num_points() < 2:
return
ri.set_line_width(3)
world_points = [pt.to_world() for pt in self.points]
ri.render_lines(world_points)
def __repr__(self, ):
tokens = [str(pt) for pt in self.points]
return "[%s: length = %.4f]" % (", ".join(tokens), self.length)
|
py | 1a38f6189deca18ac4b3561a5db5a5e1565fd1cd | description = 'Various devices for logical motors in AMOR'
includes = ['sinq_amor_movable']
devices = dict(
controller = device('nicos_sinq.amor.devices.logical_motor.AmorLogicalMotorHandler',
description = 'Logical Motors Controller',
lowlevel = True,
loglevel = 'debug'
),
m2t = device('nicos_sinq.amor.devices.logical_motor.AmorLogicalMotor',
description = 'Logical motor monochromator two theta',
motortype = 'm2t',
controller = 'controller',
),
s2t = device('nicos_sinq.amor.devices.logical_motor.AmorLogicalMotor',
description = 'Logical motor sample two theta',
motortype = 's2t',
controller = 'controller',
),
ath = device('nicos_sinq.amor.devices.logical_motor.AmorLogicalMotor',
description = 'Logical Motor analyser theta',
motortype = 'ath',
controller = 'controller',
loglevel = 'debug'
),
dimetix = device('nicos_sinq.amor.devices.dimetix.EpicsDimetix',
description = 'Laser distance measurement device',
readpv = 'SQ:AMOR:DIMETIX:DIST',
epicstimeout = 3.0,
),
laser_switch = device('nicos_sinq.amor.devices.sps_switch.SpsSwitch',
description = 'Laser light controlled by SPS',
epicstimeout = 3.0,
readpv = 'SQ:AMOR:SPS1:DigitalInput',
commandpv = 'SQ:AMOR:SPS1:Push',
commandstr = "S0001",
bytelist = [(15, 7)],
mapping = {'OFF': 0, 'ON': 1}
),
xlz = device('nicos_ess.devices.epics.motor.EpicsMotor',
description = 'Counter z position distance laser motor',
epicstimeout = 3.0,
motorpv = 'SQ:AMOR:mota:xlz',
errormsgpv = 'SQ:AMOR:mota:xlz-MsgTxt',
lowlevel = True
),
laser_positioner = device('nicos.devices.generic.Switcher',
description = 'Position laser to read components',
moveable = 'xlz',
mapping = {
'park': -0.1,
'analyser': -24.0,
'detector': 0.0,
'polariser': -88.0,
'sample': -52.0,
'slit2': -73.0,
'slit3': -63.0,
'slit4': -34.0,
'selene': -116.0,
},
fallback = '<undefined>',
precision = 0
),
Distances = device('nicos_sinq.amor.devices.component_handler.DistancesHandler',
description = 'Device to handle distance calculation in AMOR',
components = {
'polariser': (-232, 0),
'slit2': (302, 0),
'slit3': (-22, 0),
'slit4': (306, 0),
'sample': (-310, 0),
'detector': (326, 0),
'analyser': (310, 0),
'filter': (-726, 0),
'slit1': (0, 0)
},
fixedcomponents = {
'chopper': 9906,
},
switch = 'laser_switch',
positioner = 'laser_positioner',
dimetix = 'dimetix'
),
com = device('test.nicos_ess.test_devices.test_epics_motor.FakeEpicsMotor',
epicstimeout = 3.0,
description = 'Counter tilt motor',
motorpv = 'com',
),
coz = device('test.nicos_ess.test_devices.test_epics_motor.FakeEpicsMotor',
epicstimeout = 3.0,
description = 'Counter z translation motor',
motorpv = 'coz',
),
nu = device('nicos_sinq.amor.devices.logical_motor.DetectorAngleMotor',
description = 'Sample omega',
com = 'com',
coz = 'coz',
unit = 'deg',
coz_scale_factor = 10.,
),
)
|
py | 1a38f6b8bdb1778586e81302fe839f1fcf630f16 | import torch
from torch import nn, Tensor
from typing import Optional, Tuple, Union
__all__ = ['ConvLSTM2dCell', 'ConvLSTM2d']
class ConvLSTM2dCell(nn.Module):
"""
Analogous to LSTM cell, but replaces the linear transformation in the gates' definition with a convolutional layer.
For simplicity and efficiency reason, assumes that hidden state's spatial dimension is the same as that of input;
'same' padding will be enforced.
References:
Xingjian Shi et al., "Convolutional LSTM Network: A Machine Learning Approach for Precipitation Nowcasting."
https://arxiv.org/abs/1506.04214
"""
def __init__(self, in_channels: int, hidden_channels: int, kernel_size: Union[int, Tuple[int, int]], **kwargs):
super().__init__()
self.hidden_channels = hidden_channels
self.gates = nn.Conv2d(in_channels + hidden_channels, hidden_channels * 4, kernel_size, padding='same', **kwargs)
def forward(self, inputs: Tensor, hc: Optional[Tuple[Tensor, Tensor]]=None) -> Tuple[Tensor, Tensor]:
if hc is None:
hidden_dimensions = inputs.shape[0], self.hidden_channels, inputs.shape[2], inputs.shape[3]
hidden = torch.zeros(*hidden_dimensions)
cell = torch.zeros(*hidden_dimensions)
else:
hidden, cell = hc
gate_inp = torch.cat([inputs, hidden], dim=1)
inp_gate, forget_gate, inter_cell, out_gate = self.gates(gate_inp).chunk(4, 1)
cell_ret = torch.sigmoid(forget_gate) * cell + torch.sigmoid(inp_gate) * torch.tanh(inter_cell)
hidden_ret = torch.sigmoid(out_gate) * torch.tanh(cell_ret)
return (hidden_ret, cell_ret)
class ConvLSTM2d(nn.Module):
# TODO: support batch_first == True, num_layer and bidirectional
def __init__(self, in_channels: int, hidden_channels: int, kernel_size: Union[int, Tuple[int, int]], **kwargs):
"""
Analogous to LSTM, but replaces the linear transformation in the gates' definition with a convolutional layer.
For simplicity and efficiency reason, assumes that hidden state's spatial dimension is the same as that of input;
'same' padding will be enforced.
Only supports 1 layer, single direction LSTM for now.
References:
Xingjian Shi et al., "Convolutional LSTM Network: A Machine Learning Approach for Precipitation Nowcasting."
https://arxiv.org/abs/1506.04214
"""
super().__init__()
self.cell = ConvLSTM2dCell(in_channels, hidden_channels, kernel_size, **kwargs)
def forward(self, inputs: Tensor) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
outputs = []
hc = None
for t in range(inputs.shape[0]):
input = inputs[0]
hc = self.cell(input, hc)
outputs.append(hc[0])
output = torch.stack(outputs, dim=0)
return output, hc
|
py | 1a38f6c721130249eb62c02bcb63edf9ebfeae8a | <<<<<<< HEAD
from urllib import request, error, parse
#url = ('https://www.google.com/search?q=박보영')
#parse.urlencode({'q':'박보영'})
header = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.108 Safari/537.36"}
def download(url, params={}, retries=3):
resp = None
try:
req = request.Request(url + "?" + parse.urlencode(params), headers = header)
#req.add_header("user-agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.108 Safari/537.36")
resp = request.urlopen(req)
except error.HTTPError as e:
if 500 <= e.code < 600 and retries > 0:
resp = download(url, params, retries=1)
else:
print(e.code)
print(e.reason)
print(e.geturl())
print(e.headers)
return resp
params = {"q":"박보영"}
parse.urlencode(params)
resp = download("https://www.google.com/search", params)
#resp.read()
print(resp.read().decode('utf-8'))
=======
from urllib import request, error, parse
#url = ('https://www.google.com/search?q=박보영')
#parse.urlencode({'q':'박보영'})
header = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.108 Safari/537.36"}
def download(url, params={}, retries=3):
resp = None
try:
req = request.Request(url + "?" + parse.urlencode(params), headers = header)
#req.add_header("user-agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.108 Safari/537.36")
resp = request.urlopen(req)
except error.HTTPError as e:
if 500 <= e.code < 600 and retries > 0:
resp = download(url, params, retries=1)
else:
print(e.code)
print(e.reason)
print(e.geturl())
print(e.headers)
return resp
params = {"q":"박보영"}
parse.urlencode(params)
resp = download("https://www.google.com/search", params)
#resp.read()
print(resp.read().decode('utf-8'))
>>>>>>> 125e15a4c5fcf711dd279c9b18e149867466699e
|
py | 1a38f7dddaddb86abead00b138a93597ec550ca3 |
import os
import subprocess
import platform
from SetupPython import PythonConfiguration as PythonRequirements
# Make sure everything we need for the setup is installed
PythonRequirements.Validate()
from SetupPremake import PremakeConfiguration as PremakeRequirements
# from SetupVulkan import VulkanConfiguration as VulkanRequirements
os.chdir('./../') # Change from devtools/scripts directory to root
premakeInstalled = PremakeRequirements.Validate()
# VulkanRequirements.Validate()
print("\nUpdating submodules...")
subprocess.call(["git", "submodule", "update", "--init", "--recursive"])
if (premakeInstalled):
if platform.system() == "Windows":
print("\nRunning premake...")
subprocess.call([os.path.abspath("./scripts/Win-GenProjects.bat"), "nopause"])
print("\nSetup completed!")
else:
print("Hazel requires Premake to generate project files.") |
py | 1a38f8d6bd92f83b4ab203be73c59b36546fc8b1 | class OrderType:
REDIRECT = 'redirect'
DIRECT = 'direct'
CHECKOUT = 'checkout'
PAYMENTLINK = 'paymentlink'
|
py | 1a38f9303ca22a59e5d1bffbe13b74cfee161580 | n1 = int(input('Digite um número inteiro: '))
print('''Escolha uma das bases para conversão:
[ 1 ] converter para BINÁRIO
[ 2 ] converter para OCTAL
[ 3 ] converter para HEXADECIMAL''')
opção = int(input('Escolha uma opção: '))
if opção == 1:
print('{} convertido para BINÁRIO é igual a {}'.format(n1, bin(n1)[2:]))
elif opção == 2:
print('{} convertido para OCTAL é igual a {}'.format(n1, oct(n1)[2:]))
elif opção == 3:
print('{} converito para HEXADECIMAL é igual a {}'.format(n1, hex(n1)[2:]))
else:
print('Opção inválida! Tente novamente.') |
py | 1a38f94b8833cfd525f91bc90139656c2f22b31f | #!/usr/bin/env python
# -*- Mode: Python; tab-width: 4; indent-tabs-mode: nil; coding: utf-8; -*-
# vim:set ft=python ts=4 sw=4 sts=4 autoindent:
"""Wrapper for safely importing Messager with a fallback that will get
_something_ to the user even if Messager itself breaks."""
try:
from realmessage import Messager
except BaseException:
from sosmessage import SosMessager as Messager
|
py | 1a38f9cb714c1bf81a8875aad522538d43150b29 | from .star import star, star_options
class animator:
"""
Animates the steps to generate the rotating polygons generated
by rotating inners circle inside a larger circle and forming a star.
"""
def __init__(self, star: star, options: star_options):
self.star = star
self.options = options
def reset(self):
"""
Reset the animator entirely.
"""
pass
def generate_outer_circle(self):
"""
Draw the outer circle inside which the star will be made.
"""
pass
def generate_inner_circle(self):
"""
Draw the inner circle with a radius a fraction of the outer circle.
That fraction is given as the ratio.
"""
pass
def generate_inner_circle_dot(self):
"""
Draw the dot on the inner circle at the radius ratio given.
The ratio should be between 0 and 1.
"""
pass
def generate_star(self):
"""
Draw the star by rotating the inner circle leaving a trail
formed by the dot on the inner circle, forming the star.
"""
pass
def generate_other_inner_circle_dots(self):
"""
Draw the other dots on the inner circle that are added
when the circle passes over the star's spikes.
"""
pass
def generate_inner_circle_polygon(self):
"""
Draw the polygon generated by the inner circle dots.
"""
pass
def generate_other_inner_circles(self):
"""
Draw the additional inner circles and their dots and polygon.
"""
pass
def generate_inter_circle_polygons(self):
"""
Draw the polygon generated by the corresponding dots
in all inner circles.
"""
pass
def animate_all(self):
"""
Animate all the inner circles and their polygons.
"""
pass
|
py | 1a38fbf030a73c491be1d85e745d0cdf834eb2a4 | import cv2
from Recognizer import *
# for more details on how to use this code see : https://github.com/Ahmedjellouli/FaceRecognition
Recognizer = Recognizer(Database="Database",
Tolerance=0.55,
detectFrontalFace=False,
detectLandmarks=True)
Image = Image(Recognizer=Recognizer,
filename="Faces\\Malala-Yousafzai.jpg",
Save=True)
Video = Video(Recognizer=Recognizer,
filename="Videos\elon.mp4", # put your image path here e.g : D:\image.jpg
)
Image.RecognizeFaces()
Video.RecognizeFaces() # to detect faces in image
Video.AddAudio()
|
py | 1a38fc1140fb4053a4ad68a1eefa6e0dcaac2c9e | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Miroslav Bauer, CESNET.
#
# oarepo-references is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Test OARepo references fields."""
import uuid
import pytest
from tests.test_utils import TestSchema
from oarepo_references.mixins import ReferenceFieldMixin
@pytest.mark.usefixtures("db")
class TestOArepoReferencesFields:
"""OARepo references fields test."""
def test_reference_field(self, test_record_data, referenced_records):
"""Test marshmallow schema ReferenceField methods."""
schema = TestSchema()
rf = schema.fields['ref']
assert isinstance(rf, ReferenceFieldMixin)
rec_uuid = referenced_records[0].id
rf.register(test_record_data['taxo1']['links']['self'], rec_uuid, True)
assert len(rf.context['references']) == 1
ref = rf.context['references'][0]
assert ref['reference'] == \
test_record_data['taxo1']['links']['self']
assert ref['reference_uuid'] == rec_uuid
def test_marshmallow_load(self, test_record_data):
"""Test marshmallow schema load."""
schema = TestSchema()
res = schema.load(test_record_data, partial=True)
assert res == test_record_data
|
py | 1a38fca53bd1db0cc540d5b0406a1d3fe667f876 | from random import randrange
from sympy.simplify.hyperexpand import (ShiftA, ShiftB, UnShiftA, UnShiftB,
MeijerShiftA, MeijerShiftB, MeijerShiftC, MeijerShiftD,
MeijerUnShiftA, MeijerUnShiftB, MeijerUnShiftC,
MeijerUnShiftD,
ReduceOrder, reduce_order, apply_operators,
devise_plan, make_derivative_operator, Formula,
hyperexpand, Hyper_Function, G_Function,
reduce_order_meijer,
build_hypergeometric_formula)
from sympy import (hyper, I, S, meijerg, Piecewise, Tuple, Sum, binomial,
Expr, symbols)
from sympy.abc import z, a, b, c
from sympy.testing.pytest import XFAIL, raises, slow, ON_TRAVIS, skip
from sympy.testing.randtest import verify_numerically as tn
from sympy import (cos, sin, log, exp, asin, lowergamma, atanh, besseli,
gamma, sqrt, pi, erf, exp_polar, Rational)
def test_branch_bug():
assert hyperexpand(hyper((Rational(-1, 3), S.Half), (Rational(2, 3), Rational(3, 2)), -z)) == \
-z**S('1/3')*lowergamma(exp_polar(I*pi)/3, z)/5 \
+ sqrt(pi)*erf(sqrt(z))/(5*sqrt(z))
assert hyperexpand(meijerg([Rational(7, 6), 1], [], [Rational(2, 3)], [Rational(1, 6), 0], z)) == \
2*z**S('2/3')*(2*sqrt(pi)*erf(sqrt(z))/sqrt(z) - 2*lowergamma(
Rational(2, 3), z)/z**S('2/3'))*gamma(Rational(2, 3))/gamma(Rational(5, 3))
def test_hyperexpand():
# Luke, Y. L. (1969), The Special Functions and Their Approximations,
# Volume 1, section 6.2
assert hyperexpand(hyper([], [], z)) == exp(z)
assert hyperexpand(hyper([1, 1], [2], -z)*z) == log(1 + z)
assert hyperexpand(hyper([], [S.Half], -z**2/4)) == cos(z)
assert hyperexpand(z*hyper([], [S('3/2')], -z**2/4)) == sin(z)
assert hyperexpand(hyper([S('1/2'), S('1/2')], [S('3/2')], z**2)*z) \
== asin(z)
assert isinstance(Sum(binomial(2, z)*z**2, (z, 0, a)).doit(), Expr)
def can_do(ap, bq, numerical=True, div=1, lowerplane=False):
r = hyperexpand(hyper(ap, bq, z))
if r.has(hyper):
return False
if not numerical:
return True
repl = {}
randsyms = r.free_symbols - {z}
while randsyms:
# Only randomly generated parameters are checked.
for n, ai in enumerate(randsyms):
repl[ai] = randcplx(n)/div
if not any(b.is_Integer and b <= 0 for b in Tuple(*bq).subs(repl)):
break
[a, b, c, d] = [2, -1, 3, 1]
if lowerplane:
[a, b, c, d] = [2, -2, 3, -1]
return tn(
hyper(ap, bq, z).subs(repl),
r.replace(exp_polar, exp).subs(repl),
z, a=a, b=b, c=c, d=d)
def test_roach():
# Kelly B. Roach. Meijer G Function Representations.
# Section "Gallery"
assert can_do([S.Half], [Rational(9, 2)])
assert can_do([], [1, Rational(5, 2), 4])
assert can_do([Rational(-1, 2), 1, 2], [3, 4])
assert can_do([Rational(1, 3)], [Rational(-2, 3), Rational(-1, 2), S.Half, 1])
assert can_do([Rational(-3, 2), Rational(-1, 2)], [Rational(-5, 2), 1])
assert can_do([Rational(-3, 2), ], [Rational(-1, 2), S.Half]) # shine-integral
assert can_do([Rational(-3, 2), Rational(-1, 2)], [2]) # elliptic integrals
@XFAIL
def test_roach_fail():
assert can_do([Rational(-1, 2), 1], [Rational(1, 4), S.Half, Rational(3, 4)]) # PFDD
assert can_do([Rational(3, 2)], [Rational(5, 2), 5]) # struve function
assert can_do([Rational(-1, 2), S.Half, 1], [Rational(3, 2), Rational(5, 2)]) # polylog, pfdd
assert can_do([1, 2, 3], [S.Half, 4]) # XXX ?
assert can_do([S.Half], [Rational(-1, 3), Rational(-1, 2), Rational(-2, 3)]) # PFDD ?
# For the long table tests, see end of file
def test_polynomial():
from sympy import oo
assert hyperexpand(hyper([], [-1], z)) is oo
assert hyperexpand(hyper([-2], [-1], z)) is oo
assert hyperexpand(hyper([0, 0], [-1], z)) == 1
assert can_do([-5, -2, randcplx(), randcplx()], [-10, randcplx()])
assert hyperexpand(hyper((-1, 1), (-2,), z)) == 1 + z/2
def test_hyperexpand_bases():
assert hyperexpand(hyper([2], [a], z)) == \
a + z**(-a + 1)*(-a**2 + 3*a + z*(a - 1) - 2)*exp(z)* \
lowergamma(a - 1, z) - 1
# TODO [a+1, aRational(-1, 2)], [2*a]
assert hyperexpand(hyper([1, 2], [3], z)) == -2/z - 2*log(-z + 1)/z**2
assert hyperexpand(hyper([S.Half, 2], [Rational(3, 2)], z)) == \
-1/(2*z - 2) + atanh(sqrt(z))/sqrt(z)/2
assert hyperexpand(hyper([S.Half, S.Half], [Rational(5, 2)], z)) == \
(-3*z + 3)/4/(z*sqrt(-z + 1)) \
+ (6*z - 3)*asin(sqrt(z))/(4*z**Rational(3, 2))
assert hyperexpand(hyper([1, 2], [Rational(3, 2)], z)) == -1/(2*z - 2) \
- asin(sqrt(z))/(sqrt(z)*(2*z - 2)*sqrt(-z + 1))
assert hyperexpand(hyper([Rational(-1, 2) - 1, 1, 2], [S.Half, 3], z)) == \
sqrt(z)*(z*Rational(6, 7) - Rational(6, 5))*atanh(sqrt(z)) \
+ (-30*z**2 + 32*z - 6)/35/z - 6*log(-z + 1)/(35*z**2)
assert hyperexpand(hyper([1 + S.Half, 1, 1], [2, 2], z)) == \
-4*log(sqrt(-z + 1)/2 + S.Half)/z
# TODO hyperexpand(hyper([a], [2*a + 1], z))
# TODO [S.Half, a], [Rational(3, 2), a+1]
assert hyperexpand(hyper([2], [b, 1], z)) == \
z**(-b/2 + S.Half)*besseli(b - 1, 2*sqrt(z))*gamma(b) \
+ z**(-b/2 + 1)*besseli(b, 2*sqrt(z))*gamma(b)
# TODO [a], [a - S.Half, 2*a]
def test_hyperexpand_parametric():
assert hyperexpand(hyper([a, S.Half + a], [S.Half], z)) \
== (1 + sqrt(z))**(-2*a)/2 + (1 - sqrt(z))**(-2*a)/2
assert hyperexpand(hyper([a, Rational(-1, 2) + a], [2*a], z)) \
== 2**(2*a - 1)*((-z + 1)**S.Half + 1)**(-2*a + 1)
def test_shifted_sum():
from sympy import simplify
assert simplify(hyperexpand(z**4*hyper([2], [3, S('3/2')], -z**2))) \
== z*sin(2*z) + (-z**2 + S.Half)*cos(2*z) - S.Half
def _randrat():
""" Steer clear of integers. """
return S(randrange(25) + 10)/50
def randcplx(offset=-1):
""" Polys is not good with real coefficients. """
return _randrat() + I*_randrat() + I*(1 + offset)
@slow
def test_formulae():
from sympy.simplify.hyperexpand import FormulaCollection
formulae = FormulaCollection().formulae
for formula in formulae:
h = formula.func(formula.z)
rep = {}
for n, sym in enumerate(formula.symbols):
rep[sym] = randcplx(n)
# NOTE hyperexpand returns truly branched functions. We know we are
# on the main sheet, but numerical evaluation can still go wrong
# (e.g. if exp_polar cannot be evalf'd).
# Just replace all exp_polar by exp, this usually works.
# first test if the closed-form is actually correct
h = h.subs(rep)
closed_form = formula.closed_form.subs(rep).rewrite('nonrepsmall')
z = formula.z
assert tn(h, closed_form.replace(exp_polar, exp), z)
# now test the computed matrix
cl = (formula.C * formula.B)[0].subs(rep).rewrite('nonrepsmall')
assert tn(closed_form.replace(
exp_polar, exp), cl.replace(exp_polar, exp), z)
deriv1 = z*formula.B.applyfunc(lambda t: t.rewrite(
'nonrepsmall')).diff(z)
deriv2 = formula.M * formula.B
for d1, d2 in zip(deriv1, deriv2):
assert tn(d1.subs(rep).replace(exp_polar, exp),
d2.subs(rep).rewrite('nonrepsmall').replace(exp_polar, exp), z)
def test_meijerg_formulae():
from sympy.simplify.hyperexpand import MeijerFormulaCollection
formulae = MeijerFormulaCollection().formulae
for sig in formulae:
for formula in formulae[sig]:
g = meijerg(formula.func.an, formula.func.ap,
formula.func.bm, formula.func.bq,
formula.z)
rep = {}
for sym in formula.symbols:
rep[sym] = randcplx()
# first test if the closed-form is actually correct
g = g.subs(rep)
closed_form = formula.closed_form.subs(rep)
z = formula.z
assert tn(g, closed_form, z)
# now test the computed matrix
cl = (formula.C * formula.B)[0].subs(rep)
assert tn(closed_form, cl, z)
deriv1 = z*formula.B.diff(z)
deriv2 = formula.M * formula.B
for d1, d2 in zip(deriv1, deriv2):
assert tn(d1.subs(rep), d2.subs(rep), z)
def op(f):
return z*f.diff(z)
def test_plan():
assert devise_plan(Hyper_Function([0], ()),
Hyper_Function([0], ()), z) == []
with raises(ValueError):
devise_plan(Hyper_Function([1], ()), Hyper_Function((), ()), z)
with raises(ValueError):
devise_plan(Hyper_Function([2], [1]), Hyper_Function([2], [2]), z)
with raises(ValueError):
devise_plan(Hyper_Function([2], []), Hyper_Function([S("1/2")], []), z)
# We cannot use pi/(10000 + n) because polys is insanely slow.
a1, a2, b1 = (randcplx(n) for n in range(3))
b1 += 2*I
h = hyper([a1, a2], [b1], z)
h2 = hyper((a1 + 1, a2), [b1], z)
assert tn(apply_operators(h,
devise_plan(Hyper_Function((a1 + 1, a2), [b1]),
Hyper_Function((a1, a2), [b1]), z), op),
h2, z)
h2 = hyper((a1 + 1, a2 - 1), [b1], z)
assert tn(apply_operators(h,
devise_plan(Hyper_Function((a1 + 1, a2 - 1), [b1]),
Hyper_Function((a1, a2), [b1]), z), op),
h2, z)
def test_plan_derivatives():
a1, a2, a3 = 1, 2, S('1/2')
b1, b2 = 3, S('5/2')
h = Hyper_Function((a1, a2, a3), (b1, b2))
h2 = Hyper_Function((a1 + 1, a2 + 1, a3 + 2), (b1 + 1, b2 + 1))
ops = devise_plan(h2, h, z)
f = Formula(h, z, h(z), [])
deriv = make_derivative_operator(f.M, z)
assert tn((apply_operators(f.C, ops, deriv)*f.B)[0], h2(z), z)
h2 = Hyper_Function((a1, a2 - 1, a3 - 2), (b1 - 1, b2 - 1))
ops = devise_plan(h2, h, z)
assert tn((apply_operators(f.C, ops, deriv)*f.B)[0], h2(z), z)
def test_reduction_operators():
a1, a2, b1 = (randcplx(n) for n in range(3))
h = hyper([a1], [b1], z)
assert ReduceOrder(2, 0) is None
assert ReduceOrder(2, -1) is None
assert ReduceOrder(1, S('1/2')) is None
h2 = hyper((a1, a2), (b1, a2), z)
assert tn(ReduceOrder(a2, a2).apply(h, op), h2, z)
h2 = hyper((a1, a2 + 1), (b1, a2), z)
assert tn(ReduceOrder(a2 + 1, a2).apply(h, op), h2, z)
h2 = hyper((a2 + 4, a1), (b1, a2), z)
assert tn(ReduceOrder(a2 + 4, a2).apply(h, op), h2, z)
# test several step order reduction
ap = (a2 + 4, a1, b1 + 1)
bq = (a2, b1, b1)
func, ops = reduce_order(Hyper_Function(ap, bq))
assert func.ap == (a1,)
assert func.bq == (b1,)
assert tn(apply_operators(h, ops, op), hyper(ap, bq, z), z)
def test_shift_operators():
a1, a2, b1, b2, b3 = (randcplx(n) for n in range(5))
h = hyper((a1, a2), (b1, b2, b3), z)
raises(ValueError, lambda: ShiftA(0))
raises(ValueError, lambda: ShiftB(1))
assert tn(ShiftA(a1).apply(h, op), hyper((a1 + 1, a2), (b1, b2, b3), z), z)
assert tn(ShiftA(a2).apply(h, op), hyper((a1, a2 + 1), (b1, b2, b3), z), z)
assert tn(ShiftB(b1).apply(h, op), hyper((a1, a2), (b1 - 1, b2, b3), z), z)
assert tn(ShiftB(b2).apply(h, op), hyper((a1, a2), (b1, b2 - 1, b3), z), z)
assert tn(ShiftB(b3).apply(h, op), hyper((a1, a2), (b1, b2, b3 - 1), z), z)
def test_ushift_operators():
a1, a2, b1, b2, b3 = (randcplx(n) for n in range(5))
h = hyper((a1, a2), (b1, b2, b3), z)
raises(ValueError, lambda: UnShiftA((1,), (), 0, z))
raises(ValueError, lambda: UnShiftB((), (-1,), 0, z))
raises(ValueError, lambda: UnShiftA((1,), (0, -1, 1), 0, z))
raises(ValueError, lambda: UnShiftB((0, 1), (1,), 0, z))
s = UnShiftA((a1, a2), (b1, b2, b3), 0, z)
assert tn(s.apply(h, op), hyper((a1 - 1, a2), (b1, b2, b3), z), z)
s = UnShiftA((a1, a2), (b1, b2, b3), 1, z)
assert tn(s.apply(h, op), hyper((a1, a2 - 1), (b1, b2, b3), z), z)
s = UnShiftB((a1, a2), (b1, b2, b3), 0, z)
assert tn(s.apply(h, op), hyper((a1, a2), (b1 + 1, b2, b3), z), z)
s = UnShiftB((a1, a2), (b1, b2, b3), 1, z)
assert tn(s.apply(h, op), hyper((a1, a2), (b1, b2 + 1, b3), z), z)
s = UnShiftB((a1, a2), (b1, b2, b3), 2, z)
assert tn(s.apply(h, op), hyper((a1, a2), (b1, b2, b3 + 1), z), z)
def can_do_meijer(a1, a2, b1, b2, numeric=True):
"""
This helper function tries to hyperexpand() the meijer g-function
corresponding to the parameters a1, a2, b1, b2.
It returns False if this expansion still contains g-functions.
If numeric is True, it also tests the so-obtained formula numerically
(at random values) and returns False if the test fails.
Else it returns True.
"""
from sympy import unpolarify, expand
r = hyperexpand(meijerg(a1, a2, b1, b2, z))
if r.has(meijerg):
return False
# NOTE hyperexpand() returns a truly branched function, whereas numerical
# evaluation only works on the main branch. Since we are evaluating on
# the main branch, this should not be a problem, but expressions like
# exp_polar(I*pi/2*x)**a are evaluated incorrectly. We thus have to get
# rid of them. The expand heuristically does this...
r = unpolarify(expand(r, force=True, power_base=True, power_exp=False,
mul=False, log=False, multinomial=False, basic=False))
if not numeric:
return True
repl = {}
for n, ai in enumerate(meijerg(a1, a2, b1, b2, z).free_symbols - {z}):
repl[ai] = randcplx(n)
return tn(meijerg(a1, a2, b1, b2, z).subs(repl), r.subs(repl), z)
@slow
def test_meijerg_expand():
from sympy import gammasimp, simplify
# from mpmath docs
assert hyperexpand(meijerg([[], []], [[0], []], -z)) == exp(z)
assert hyperexpand(meijerg([[1, 1], []], [[1], [0]], z)) == \
log(z + 1)
assert hyperexpand(meijerg([[1, 1], []], [[1], [1]], z)) == \
z/(z + 1)
assert hyperexpand(meijerg([[], []], [[S.Half], [0]], (z/2)**2)) \
== sin(z)/sqrt(pi)
assert hyperexpand(meijerg([[], []], [[0], [S.Half]], (z/2)**2)) \
== cos(z)/sqrt(pi)
assert can_do_meijer([], [a], [a - 1, a - S.Half], [])
assert can_do_meijer([], [], [a/2], [-a/2], False) # branches...
assert can_do_meijer([a], [b], [a], [b, a - 1])
# wikipedia
assert hyperexpand(meijerg([1], [], [], [0], z)) == \
Piecewise((0, abs(z) < 1), (1, abs(1/z) < 1),
(meijerg([1], [], [], [0], z), True))
assert hyperexpand(meijerg([], [1], [0], [], z)) == \
Piecewise((1, abs(z) < 1), (0, abs(1/z) < 1),
(meijerg([], [1], [0], [], z), True))
# The Special Functions and their Approximations
assert can_do_meijer([], [], [a + b/2], [a, a - b/2, a + S.Half])
assert can_do_meijer(
[], [], [a], [b], False) # branches only agree for small z
assert can_do_meijer([], [S.Half], [a], [-a])
assert can_do_meijer([], [], [a, b], [])
assert can_do_meijer([], [], [a, b], [])
assert can_do_meijer([], [], [a, a + S.Half], [b, b + S.Half])
assert can_do_meijer([], [], [a, -a], [0, S.Half], False) # dito
assert can_do_meijer([], [], [a, a + S.Half, b, b + S.Half], [])
assert can_do_meijer([S.Half], [], [0], [a, -a])
assert can_do_meijer([S.Half], [], [a], [0, -a], False) # dito
assert can_do_meijer([], [a - S.Half], [a, b], [a - S.Half], False)
assert can_do_meijer([], [a + S.Half], [a + b, a - b, a], [], False)
assert can_do_meijer([a + S.Half], [], [b, 2*a - b, a], [], False)
# This for example is actually zero.
assert can_do_meijer([], [], [], [a, b])
# Testing a bug:
assert hyperexpand(meijerg([0, 2], [], [], [-1, 1], z)) == \
Piecewise((0, abs(z) < 1),
(z*(1 - 1/z**2)/2, abs(1/z) < 1),
(meijerg([0, 2], [], [], [-1, 1], z), True))
# Test that the simplest possible answer is returned:
assert gammasimp(simplify(hyperexpand(
meijerg([1], [1 - a], [-a/2, -a/2 + S.Half], [], 1/z)))) == \
-2*sqrt(pi)*(sqrt(z + 1) + 1)**a/a
# Test that hyper is returned
assert hyperexpand(meijerg([1], [], [a], [0, 0], z)) == hyper(
(a,), (a + 1, a + 1), z*exp_polar(I*pi))*z**a*gamma(a)/gamma(a + 1)**2
# Test place option
f = meijerg(((0, 1), ()), ((S.Half,), (0,)), z**2)
assert hyperexpand(f) == sqrt(pi)/sqrt(1 + z**(-2))
assert hyperexpand(f, place=0) == sqrt(pi)*z/sqrt(z**2 + 1)
def test_meijerg_lookup():
from sympy import uppergamma, Si, Ci
assert hyperexpand(meijerg([a], [], [b, a], [], z)) == \
z**b*exp(z)*gamma(-a + b + 1)*uppergamma(a - b, z)
assert hyperexpand(meijerg([0], [], [0, 0], [], z)) == \
exp(z)*uppergamma(0, z)
assert can_do_meijer([a], [], [b, a + 1], [])
assert can_do_meijer([a], [], [b + 2, a], [])
assert can_do_meijer([a], [], [b - 2, a], [])
assert hyperexpand(meijerg([a], [], [a, a, a - S.Half], [], z)) == \
-sqrt(pi)*z**(a - S.Half)*(2*cos(2*sqrt(z))*(Si(2*sqrt(z)) - pi/2)
- 2*sin(2*sqrt(z))*Ci(2*sqrt(z))) == \
hyperexpand(meijerg([a], [], [a, a - S.Half, a], [], z)) == \
hyperexpand(meijerg([a], [], [a - S.Half, a, a], [], z))
assert can_do_meijer([a - 1], [], [a + 2, a - Rational(3, 2), a + 1], [])
@XFAIL
def test_meijerg_expand_fail():
# These basically test hyper([], [1/2 - a, 1/2 + 1, 1/2], z),
# which is *very* messy. But since the meijer g actually yields a
# sum of bessel functions, things can sometimes be simplified a lot and
# are then put into tables...
assert can_do_meijer([], [], [a + S.Half], [a, a - b/2, a + b/2])
assert can_do_meijer([], [], [0, S.Half], [a, -a])
assert can_do_meijer([], [], [3*a - S.Half, a, -a - S.Half], [a - S.Half])
assert can_do_meijer([], [], [0, a - S.Half, -a - S.Half], [S.Half])
assert can_do_meijer([], [], [a, b + S.Half, b], [2*b - a])
assert can_do_meijer([], [], [a, b + S.Half, b, 2*b - a])
assert can_do_meijer([S.Half], [], [-a, a], [0])
@slow
def test_meijerg():
# carefully set up the parameters.
# NOTE: this used to fail sometimes. I believe it is fixed, but if you
# hit an inexplicable test failure here, please let me know the seed.
a1, a2 = (randcplx(n) - 5*I - n*I for n in range(2))
b1, b2 = (randcplx(n) + 5*I + n*I for n in range(2))
b3, b4, b5, a3, a4, a5 = (randcplx() for n in range(6))
g = meijerg([a1], [a3, a4], [b1], [b3, b4], z)
assert ReduceOrder.meijer_minus(3, 4) is None
assert ReduceOrder.meijer_plus(4, 3) is None
g2 = meijerg([a1, a2], [a3, a4], [b1], [b3, b4, a2], z)
assert tn(ReduceOrder.meijer_plus(a2, a2).apply(g, op), g2, z)
g2 = meijerg([a1, a2], [a3, a4], [b1], [b3, b4, a2 + 1], z)
assert tn(ReduceOrder.meijer_plus(a2, a2 + 1).apply(g, op), g2, z)
g2 = meijerg([a1, a2 - 1], [a3, a4], [b1], [b3, b4, a2 + 2], z)
assert tn(ReduceOrder.meijer_plus(a2 - 1, a2 + 2).apply(g, op), g2, z)
g2 = meijerg([a1], [a3, a4, b2 - 1], [b1, b2 + 2], [b3, b4], z)
assert tn(ReduceOrder.meijer_minus(
b2 + 2, b2 - 1).apply(g, op), g2, z, tol=1e-6)
# test several-step reduction
an = [a1, a2]
bq = [b3, b4, a2 + 1]
ap = [a3, a4, b2 - 1]
bm = [b1, b2 + 1]
niq, ops = reduce_order_meijer(G_Function(an, ap, bm, bq))
assert niq.an == (a1,)
assert set(niq.ap) == {a3, a4}
assert niq.bm == (b1,)
assert set(niq.bq) == {b3, b4}
assert tn(apply_operators(g, ops, op), meijerg(an, ap, bm, bq, z), z)
def test_meijerg_shift_operators():
# carefully set up the parameters. XXX this still fails sometimes
a1, a2, a3, a4, a5, b1, b2, b3, b4, b5 = (randcplx(n) for n in range(10))
g = meijerg([a1], [a3, a4], [b1], [b3, b4], z)
assert tn(MeijerShiftA(b1).apply(g, op),
meijerg([a1], [a3, a4], [b1 + 1], [b3, b4], z), z)
assert tn(MeijerShiftB(a1).apply(g, op),
meijerg([a1 - 1], [a3, a4], [b1], [b3, b4], z), z)
assert tn(MeijerShiftC(b3).apply(g, op),
meijerg([a1], [a3, a4], [b1], [b3 + 1, b4], z), z)
assert tn(MeijerShiftD(a3).apply(g, op),
meijerg([a1], [a3 - 1, a4], [b1], [b3, b4], z), z)
s = MeijerUnShiftA([a1], [a3, a4], [b1], [b3, b4], 0, z)
assert tn(
s.apply(g, op), meijerg([a1], [a3, a4], [b1 - 1], [b3, b4], z), z)
s = MeijerUnShiftC([a1], [a3, a4], [b1], [b3, b4], 0, z)
assert tn(
s.apply(g, op), meijerg([a1], [a3, a4], [b1], [b3 - 1, b4], z), z)
s = MeijerUnShiftB([a1], [a3, a4], [b1], [b3, b4], 0, z)
assert tn(
s.apply(g, op), meijerg([a1 + 1], [a3, a4], [b1], [b3, b4], z), z)
s = MeijerUnShiftD([a1], [a3, a4], [b1], [b3, b4], 0, z)
assert tn(
s.apply(g, op), meijerg([a1], [a3 + 1, a4], [b1], [b3, b4], z), z)
@slow
def test_meijerg_confluence():
def t(m, a, b):
from sympy import sympify
a, b = sympify([a, b])
m_ = m
m = hyperexpand(m)
if not m == Piecewise((a, abs(z) < 1), (b, abs(1/z) < 1), (m_, True)):
return False
if not (m.args[0].args[0] == a and m.args[1].args[0] == b):
return False
z0 = randcplx()/10
if abs(m.subs(z, z0).n() - a.subs(z, z0).n()).n() > 1e-10:
return False
if abs(m.subs(z, 1/z0).n() - b.subs(z, 1/z0).n()).n() > 1e-10:
return False
return True
assert t(meijerg([], [1, 1], [0, 0], [], z), -log(z), 0)
assert t(meijerg(
[], [3, 1], [0, 0], [], z), -z**2/4 + z - log(z)/2 - Rational(3, 4), 0)
assert t(meijerg([], [3, 1], [-1, 0], [], z),
z**2/12 - z/2 + log(z)/2 + Rational(1, 4) + 1/(6*z), 0)
assert t(meijerg([], [1, 1, 1, 1], [0, 0, 0, 0], [], z), -log(z)**3/6, 0)
assert t(meijerg([1, 1], [], [], [0, 0], z), 0, -log(1/z))
assert t(meijerg([1, 1], [2, 2], [1, 1], [0, 0], z),
-z*log(z) + 2*z, -log(1/z) + 2)
assert t(meijerg([S.Half], [1, 1], [0, 0], [Rational(3, 2)], z), log(z)/2 - 1, 0)
def u(an, ap, bm, bq):
m = meijerg(an, ap, bm, bq, z)
m2 = hyperexpand(m, allow_hyper=True)
if m2.has(meijerg) and not (m2.is_Piecewise and len(m2.args) == 3):
return False
return tn(m, m2, z)
assert u([], [1], [0, 0], [])
assert u([1, 1], [], [], [0])
assert u([1, 1], [2, 2, 5], [1, 1, 6], [0, 0])
assert u([1, 1], [2, 2, 5], [1, 1, 6], [0])
def test_meijerg_with_Floats():
# see issue #10681
from sympy import RR
f = meijerg(((3.0, 1), ()), ((Rational(3, 2),), (0,)), z)
a = -2.3632718012073
g = a*z**Rational(3, 2)*hyper((-0.5, Rational(3, 2)), (Rational(5, 2),), z*exp_polar(I*pi))
assert RR.almosteq((hyperexpand(f)/g).n(), 1.0, 1e-12)
def test_lerchphi():
from sympy import gammasimp, polylog, lerchphi
assert hyperexpand(hyper([1, a], [a + 1], z)/a) == lerchphi(z, 1, a)
assert hyperexpand(
hyper([1, a, a], [a + 1, a + 1], z)/a**2) == lerchphi(z, 2, a)
assert hyperexpand(hyper([1, a, a, a], [a + 1, a + 1, a + 1], z)/a**3) == \
lerchphi(z, 3, a)
assert hyperexpand(hyper([1] + [a]*10, [a + 1]*10, z)/a**10) == \
lerchphi(z, 10, a)
assert gammasimp(hyperexpand(meijerg([0, 1 - a], [], [0],
[-a], exp_polar(-I*pi)*z))) == lerchphi(z, 1, a)
assert gammasimp(hyperexpand(meijerg([0, 1 - a, 1 - a], [], [0],
[-a, -a], exp_polar(-I*pi)*z))) == lerchphi(z, 2, a)
assert gammasimp(hyperexpand(meijerg([0, 1 - a, 1 - a, 1 - a], [], [0],
[-a, -a, -a], exp_polar(-I*pi)*z))) == lerchphi(z, 3, a)
assert hyperexpand(z*hyper([1, 1], [2], z)) == -log(1 + -z)
assert hyperexpand(z*hyper([1, 1, 1], [2, 2], z)) == polylog(2, z)
assert hyperexpand(z*hyper([1, 1, 1, 1], [2, 2, 2], z)) == polylog(3, z)
assert hyperexpand(hyper([1, a, 1 + S.Half], [a + 1, S.Half], z)) == \
-2*a/(z - 1) + (-2*a**2 + a)*lerchphi(z, 1, a)
# Now numerical tests. These make sure reductions etc are carried out
# correctly
# a rational function (polylog at negative integer order)
assert can_do([2, 2, 2], [1, 1])
# NOTE these contain log(1-x) etc ... better make sure we have |z| < 1
# reduction of order for polylog
assert can_do([1, 1, 1, b + 5], [2, 2, b], div=10)
# reduction of order for lerchphi
# XXX lerchphi in mpmath is flaky
assert can_do(
[1, a, a, a, b + 5], [a + 1, a + 1, a + 1, b], numerical=False)
# test a bug
from sympy import Abs
assert hyperexpand(hyper([S.Half, S.Half, S.Half, 1],
[Rational(3, 2), Rational(3, 2), Rational(3, 2)], Rational(1, 4))) == \
Abs(-polylog(3, exp_polar(I*pi)/2) + polylog(3, S.Half))
def test_partial_simp():
# First test that hypergeometric function formulae work.
a, b, c, d, e = (randcplx() for _ in range(5))
for func in [Hyper_Function([a, b, c], [d, e]),
Hyper_Function([], [a, b, c, d, e])]:
f = build_hypergeometric_formula(func)
z = f.z
assert f.closed_form == func(z)
deriv1 = f.B.diff(z)*z
deriv2 = f.M*f.B
for func1, func2 in zip(deriv1, deriv2):
assert tn(func1, func2, z)
# Now test that formulae are partially simplified.
a, b, z = symbols('a b z')
assert hyperexpand(hyper([3, a], [1, b], z)) == \
(-a*b/2 + a*z/2 + 2*a)*hyper([a + 1], [b], z) \
+ (a*b/2 - 2*a + 1)*hyper([a], [b], z)
assert tn(
hyperexpand(hyper([3, d], [1, e], z)), hyper([3, d], [1, e], z), z)
assert hyperexpand(hyper([3], [1, a, b], z)) == \
hyper((), (a, b), z) \
+ z*hyper((), (a + 1, b), z)/(2*a) \
- z*(b - 4)*hyper((), (a + 1, b + 1), z)/(2*a*b)
assert tn(
hyperexpand(hyper([3], [1, d, e], z)), hyper([3], [1, d, e], z), z)
def test_hyperexpand_special():
assert hyperexpand(hyper([a, b], [c], 1)) == \
gamma(c)*gamma(c - a - b)/gamma(c - a)/gamma(c - b)
assert hyperexpand(hyper([a, b], [1 + a - b], -1)) == \
gamma(1 + a/2)*gamma(1 + a - b)/gamma(1 + a)/gamma(1 + a/2 - b)
assert hyperexpand(hyper([a, b], [1 + b - a], -1)) == \
gamma(1 + b/2)*gamma(1 + b - a)/gamma(1 + b)/gamma(1 + b/2 - a)
assert hyperexpand(meijerg([1 - z - a/2], [1 - z + a/2], [b/2], [-b/2], 1)) == \
gamma(1 - 2*z)*gamma(z + a/2 + b/2)/gamma(1 - z + a/2 - b/2) \
/gamma(1 - z - a/2 + b/2)/gamma(1 - z + a/2 + b/2)
assert hyperexpand(hyper([a], [b], 0)) == 1
assert hyper([a], [b], 0) != 0
def test_Mod1_behavior():
from sympy import Symbol, simplify
n = Symbol('n', integer=True)
# Note: this should not hang.
assert simplify(hyperexpand(meijerg([1], [], [n + 1], [0], z))) == \
lowergamma(n + 1, z)
@slow
def test_prudnikov_misc():
assert can_do([1, (3 + I)/2, (3 - I)/2], [Rational(3, 2), 2])
assert can_do([S.Half, a - 1], [Rational(3, 2), a + 1], lowerplane=True)
assert can_do([], [b + 1])
assert can_do([a], [a - 1, b + 1])
assert can_do([a], [a - S.Half, 2*a])
assert can_do([a], [a - S.Half, 2*a + 1])
assert can_do([a], [a - S.Half, 2*a - 1])
assert can_do([a], [a + S.Half, 2*a])
assert can_do([a], [a + S.Half, 2*a + 1])
assert can_do([a], [a + S.Half, 2*a - 1])
assert can_do([S.Half], [b, 2 - b])
assert can_do([S.Half], [b, 3 - b])
assert can_do([1], [2, b])
assert can_do([a, a + S.Half], [2*a, b, 2*a - b + 1])
assert can_do([a, a + S.Half], [S.Half, 2*a, 2*a + S.Half])
assert can_do([a], [a + 1], lowerplane=True) # lowergamma
def test_prudnikov_1():
# A. P. Prudnikov, Yu. A. Brychkov and O. I. Marichev (1990).
# Integrals and Series: More Special Functions, Vol. 3,.
# Gordon and Breach Science Publisher
# 7.3.1
assert can_do([a, -a], [S.Half])
assert can_do([a, 1 - a], [S.Half])
assert can_do([a, 1 - a], [Rational(3, 2)])
assert can_do([a, 2 - a], [S.Half])
assert can_do([a, 2 - a], [Rational(3, 2)])
assert can_do([a, 2 - a], [Rational(3, 2)])
assert can_do([a, a + S.Half], [2*a - 1])
assert can_do([a, a + S.Half], [2*a])
assert can_do([a, a + S.Half], [2*a + 1])
assert can_do([a, a + S.Half], [S.Half])
assert can_do([a, a + S.Half], [Rational(3, 2)])
assert can_do([a, a/2 + 1], [a/2])
assert can_do([1, b], [2])
assert can_do([1, b], [b + 1], numerical=False) # Lerch Phi
# NOTE: branches are complicated for |z| > 1
assert can_do([a], [2*a])
assert can_do([a], [2*a + 1])
assert can_do([a], [2*a - 1])
@slow
def test_prudnikov_2():
h = S.Half
assert can_do([-h, -h], [h])
assert can_do([-h, h], [3*h])
assert can_do([-h, h], [5*h])
assert can_do([-h, h], [7*h])
assert can_do([-h, 1], [h])
for p in [-h, h]:
for n in [-h, h, 1, 3*h, 2, 5*h, 3, 7*h, 4]:
for m in [-h, h, 3*h, 5*h, 7*h]:
assert can_do([p, n], [m])
for n in [1, 2, 3, 4]:
for m in [1, 2, 3, 4]:
assert can_do([p, n], [m])
@slow
def test_prudnikov_3():
if ON_TRAVIS:
# See https://github.com/sympy/sympy/pull/12795
skip("Too slow for travis.")
h = S.Half
assert can_do([Rational(1, 4), Rational(3, 4)], [h])
assert can_do([Rational(1, 4), Rational(3, 4)], [3*h])
assert can_do([Rational(1, 3), Rational(2, 3)], [3*h])
assert can_do([Rational(3, 4), Rational(5, 4)], [h])
assert can_do([Rational(3, 4), Rational(5, 4)], [3*h])
for p in [1, 2, 3, 4]:
for n in [-h, h, 1, 3*h, 2, 5*h, 3, 7*h, 4, 9*h]:
for m in [1, 3*h, 2, 5*h, 3, 7*h, 4]:
assert can_do([p, m], [n])
@slow
def test_prudnikov_4():
h = S.Half
for p in [3*h, 5*h, 7*h]:
for n in [-h, h, 3*h, 5*h, 7*h]:
for m in [3*h, 2, 5*h, 3, 7*h, 4]:
assert can_do([p, m], [n])
for n in [1, 2, 3, 4]:
for m in [2, 3, 4]:
assert can_do([p, m], [n])
@slow
def test_prudnikov_5():
h = S.Half
for p in [1, 2, 3]:
for q in range(p, 4):
for r in [1, 2, 3]:
for s in range(r, 4):
assert can_do([-h, p, q], [r, s])
for p in [h, 1, 3*h, 2, 5*h, 3]:
for q in [h, 3*h, 5*h]:
for r in [h, 3*h, 5*h]:
for s in [h, 3*h, 5*h]:
if s <= q and s <= r:
assert can_do([-h, p, q], [r, s])
for p in [h, 1, 3*h, 2, 5*h, 3]:
for q in [1, 2, 3]:
for r in [h, 3*h, 5*h]:
for s in [1, 2, 3]:
assert can_do([-h, p, q], [r, s])
@slow
def test_prudnikov_6():
h = S.Half
for m in [3*h, 5*h]:
for n in [1, 2, 3]:
for q in [h, 1, 2]:
for p in [1, 2, 3]:
assert can_do([h, q, p], [m, n])
for q in [1, 2, 3]:
for p in [3*h, 5*h]:
assert can_do([h, q, p], [m, n])
for q in [1, 2]:
for p in [1, 2, 3]:
for m in [1, 2, 3]:
for n in [1, 2, 3]:
assert can_do([h, q, p], [m, n])
assert can_do([h, h, 5*h], [3*h, 3*h])
assert can_do([h, 1, 5*h], [3*h, 3*h])
assert can_do([h, 2, 2], [1, 3])
# pages 435 to 457 contain more PFDD and stuff like this
@slow
def test_prudnikov_7():
assert can_do([3], [6])
h = S.Half
for n in [h, 3*h, 5*h, 7*h]:
assert can_do([-h], [n])
for m in [-h, h, 1, 3*h, 2, 5*h, 3, 7*h, 4]: # HERE
for n in [-h, h, 3*h, 5*h, 7*h, 1, 2, 3, 4]:
assert can_do([m], [n])
@slow
def test_prudnikov_8():
h = S.Half
# 7.12.2
for ai in [1, 2, 3]:
for bi in [1, 2, 3]:
for ci in range(1, ai + 1):
for di in [h, 1, 3*h, 2, 5*h, 3]:
assert can_do([ai, bi], [ci, di])
for bi in [3*h, 5*h]:
for ci in [h, 1, 3*h, 2, 5*h, 3]:
for di in [1, 2, 3]:
assert can_do([ai, bi], [ci, di])
for ai in [-h, h, 3*h, 5*h]:
for bi in [1, 2, 3]:
for ci in [h, 1, 3*h, 2, 5*h, 3]:
for di in [1, 2, 3]:
assert can_do([ai, bi], [ci, di])
for bi in [h, 3*h, 5*h]:
for ci in [h, 3*h, 5*h, 3]:
for di in [h, 1, 3*h, 2, 5*h, 3]:
if ci <= bi:
assert can_do([ai, bi], [ci, di])
def test_prudnikov_9():
# 7.13.1 [we have a general formula ... so this is a bit pointless]
for i in range(9):
assert can_do([], [(S(i) + 1)/2])
for i in range(5):
assert can_do([], [-(2*S(i) + 1)/2])
@slow
def test_prudnikov_10():
# 7.14.2
h = S.Half
for p in [-h, h, 1, 3*h, 2, 5*h, 3, 7*h, 4]:
for m in [1, 2, 3, 4]:
for n in range(m, 5):
assert can_do([p], [m, n])
for p in [1, 2, 3, 4]:
for n in [h, 3*h, 5*h, 7*h]:
for m in [1, 2, 3, 4]:
assert can_do([p], [n, m])
for p in [3*h, 5*h, 7*h]:
for m in [h, 1, 2, 5*h, 3, 7*h, 4]:
assert can_do([p], [h, m])
assert can_do([p], [3*h, m])
for m in [h, 1, 2, 5*h, 3, 7*h, 4]:
assert can_do([7*h], [5*h, m])
assert can_do([Rational(-1, 2)], [S.Half, S.Half]) # shine-integral shi
def test_prudnikov_11():
# 7.15
assert can_do([a, a + S.Half], [2*a, b, 2*a - b])
assert can_do([a, a + S.Half], [Rational(3, 2), 2*a, 2*a - S.Half])
assert can_do([Rational(1, 4), Rational(3, 4)], [S.Half, S.Half, 1])
assert can_do([Rational(5, 4), Rational(3, 4)], [Rational(3, 2), S.Half, 2])
assert can_do([Rational(5, 4), Rational(3, 4)], [Rational(3, 2), Rational(3, 2), 1])
assert can_do([Rational(5, 4), Rational(7, 4)], [Rational(3, 2), Rational(5, 2), 2])
assert can_do([1, 1], [Rational(3, 2), 2, 2]) # cosh-integral chi
def test_prudnikov_12():
# 7.16
assert can_do(
[], [a, a + S.Half, 2*a], False) # branches only agree for some z!
assert can_do([], [a, a + S.Half, 2*a + 1], False) # dito
assert can_do([], [S.Half, a, a + S.Half])
assert can_do([], [Rational(3, 2), a, a + S.Half])
assert can_do([], [Rational(1, 4), S.Half, Rational(3, 4)])
assert can_do([], [S.Half, S.Half, 1])
assert can_do([], [S.Half, Rational(3, 2), 1])
assert can_do([], [Rational(3, 4), Rational(3, 2), Rational(5, 4)])
assert can_do([], [1, 1, Rational(3, 2)])
assert can_do([], [1, 2, Rational(3, 2)])
assert can_do([], [1, Rational(3, 2), Rational(3, 2)])
assert can_do([], [Rational(5, 4), Rational(3, 2), Rational(7, 4)])
assert can_do([], [2, Rational(3, 2), Rational(3, 2)])
@slow
def test_prudnikov_2F1():
h = S.Half
# Elliptic integrals
for p in [-h, h]:
for m in [h, 3*h, 5*h, 7*h]:
for n in [1, 2, 3, 4]:
assert can_do([p, m], [n])
@XFAIL
def test_prudnikov_fail_2F1():
assert can_do([a, b], [b + 1]) # incomplete beta function
assert can_do([-1, b], [c]) # Poly. also -2, -3 etc
# TODO polys
# Legendre functions:
assert can_do([a, b], [a + b + S.Half])
assert can_do([a, b], [a + b - S.Half])
assert can_do([a, b], [a + b + Rational(3, 2)])
assert can_do([a, b], [(a + b + 1)/2])
assert can_do([a, b], [(a + b)/2 + 1])
assert can_do([a, b], [a - b + 1])
assert can_do([a, b], [a - b + 2])
assert can_do([a, b], [2*b])
assert can_do([a, b], [S.Half])
assert can_do([a, b], [Rational(3, 2)])
assert can_do([a, 1 - a], [c])
assert can_do([a, 2 - a], [c])
assert can_do([a, 3 - a], [c])
assert can_do([a, a + S.Half], [c])
assert can_do([1, b], [c])
assert can_do([1, b], [Rational(3, 2)])
assert can_do([Rational(1, 4), Rational(3, 4)], [1])
# PFDD
o = S.One
assert can_do([o/8, 1], [o/8*9])
assert can_do([o/6, 1], [o/6*7])
assert can_do([o/6, 1], [o/6*13])
assert can_do([o/5, 1], [o/5*6])
assert can_do([o/5, 1], [o/5*11])
assert can_do([o/4, 1], [o/4*5])
assert can_do([o/4, 1], [o/4*9])
assert can_do([o/3, 1], [o/3*4])
assert can_do([o/3, 1], [o/3*7])
assert can_do([o/8*3, 1], [o/8*11])
assert can_do([o/5*2, 1], [o/5*7])
assert can_do([o/5*2, 1], [o/5*12])
assert can_do([o/5*3, 1], [o/5*8])
assert can_do([o/5*3, 1], [o/5*13])
assert can_do([o/8*5, 1], [o/8*13])
assert can_do([o/4*3, 1], [o/4*7])
assert can_do([o/4*3, 1], [o/4*11])
assert can_do([o/3*2, 1], [o/3*5])
assert can_do([o/3*2, 1], [o/3*8])
assert can_do([o/5*4, 1], [o/5*9])
assert can_do([o/5*4, 1], [o/5*14])
assert can_do([o/6*5, 1], [o/6*11])
assert can_do([o/6*5, 1], [o/6*17])
assert can_do([o/8*7, 1], [o/8*15])
@XFAIL
def test_prudnikov_fail_3F2():
assert can_do([a, a + Rational(1, 3), a + Rational(2, 3)], [Rational(1, 3), Rational(2, 3)])
assert can_do([a, a + Rational(1, 3), a + Rational(2, 3)], [Rational(2, 3), Rational(4, 3)])
assert can_do([a, a + Rational(1, 3), a + Rational(2, 3)], [Rational(4, 3), Rational(5, 3)])
# page 421
assert can_do([a, a + Rational(1, 3), a + Rational(2, 3)], [a*Rational(3, 2), (3*a + 1)/2])
# pages 422 ...
assert can_do([Rational(-1, 2), S.Half, S.Half], [1, 1]) # elliptic integrals
assert can_do([Rational(-1, 2), S.Half, 1], [Rational(3, 2), Rational(3, 2)])
# TODO LOTS more
# PFDD
assert can_do([Rational(1, 8), Rational(3, 8), 1], [Rational(9, 8), Rational(11, 8)])
assert can_do([Rational(1, 8), Rational(5, 8), 1], [Rational(9, 8), Rational(13, 8)])
assert can_do([Rational(1, 8), Rational(7, 8), 1], [Rational(9, 8), Rational(15, 8)])
assert can_do([Rational(1, 6), Rational(1, 3), 1], [Rational(7, 6), Rational(4, 3)])
assert can_do([Rational(1, 6), Rational(2, 3), 1], [Rational(7, 6), Rational(5, 3)])
assert can_do([Rational(1, 6), Rational(2, 3), 1], [Rational(5, 3), Rational(13, 6)])
assert can_do([S.Half, 1, 1], [Rational(1, 4), Rational(3, 4)])
# LOTS more
@XFAIL
def test_prudnikov_fail_other():
# 7.11.2
# 7.12.1
assert can_do([1, a], [b, 1 - 2*a + b]) # ???
# 7.14.2
assert can_do([Rational(-1, 2)], [S.Half, 1]) # struve
assert can_do([1], [S.Half, S.Half]) # struve
assert can_do([Rational(1, 4)], [S.Half, Rational(5, 4)]) # PFDD
assert can_do([Rational(3, 4)], [Rational(3, 2), Rational(7, 4)]) # PFDD
assert can_do([1], [Rational(1, 4), Rational(3, 4)]) # PFDD
assert can_do([1], [Rational(3, 4), Rational(5, 4)]) # PFDD
assert can_do([1], [Rational(5, 4), Rational(7, 4)]) # PFDD
# TODO LOTS more
# 7.15.2
assert can_do([S.Half, 1], [Rational(3, 4), Rational(5, 4), Rational(3, 2)]) # PFDD
assert can_do([S.Half, 1], [Rational(7, 4), Rational(5, 4), Rational(3, 2)]) # PFDD
# 7.16.1
assert can_do([], [Rational(1, 3), S(2/3)]) # PFDD
assert can_do([], [Rational(2, 3), S(4/3)]) # PFDD
assert can_do([], [Rational(5, 3), S(4/3)]) # PFDD
# XXX this does not *evaluate* right??
assert can_do([], [a, a + S.Half, 2*a - 1])
def test_bug():
h = hyper([-1, 1], [z], -1)
assert hyperexpand(h) == (z + 1)/z
def test_omgissue_203():
h = hyper((-5, -3, -4), (-6, -6), 1)
assert hyperexpand(h) == Rational(1, 30)
h = hyper((-6, -7, -5), (-6, -6), 1)
assert hyperexpand(h) == Rational(-1, 6)
|
py | 1a38fff800e873eaee697160600e2451b299544f | #!python
from bbfreeze import Freezer
import shutil
destDir = 'dist'
def main():
#includes = ['requests', 'email.utils']
includes = ['requests', 'email.utils']
excludes = ['_gtkagg', '_tkagg', 'bsddb', 'curses', 'email', 'pywin.debugger',
'pywin.debugger.dbgcon', 'pywin.dialogs', 'tcl', 'tk'
'Tkconstants', 'Tkinter',]
frz = Freezer(destDir, includes=includes, excludes=excludes)
#frz.addScript("meteor.py", gui_only=True)
frz.addScript("play_development.py")
frz.addScript("play_fullscreen.py", gui_only=True)
frz.addScript("play_windowed.py", gui_only=True)
#frz.addScript("gameassets.py")
#frz.addScript("geoip.py")
#frz.addScript("shipsprite.py")
#frz.addScript("sprites.py")
#frz.addScript("timevars.py")
#frz.addScript("vector.py")
frz.use_compression = 0
frz.include_py = True
frz()
addFile('config.json')
addFile('avbin.dll')
#addDir('images')
#addDir('fonts')
#addDir('sounds')
addDir('themes')
def addFile(f):
# Add a non-script file to directory.
# Why this isn't part of bbfreeze beats me
# Currently assumes file is in script directory. That's lazy but all
# I need for now.
d = "%s/%s" % (destDir, f)
shutil.copyfile( f, d)
def addDir(d):
dd = "%s/%s" % (destDir, d)
shutil.copytree( d, dd)
main()
|
py | 1a390043d2556e0880cdcb63695dbcd97e7a5231 | import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
logging.getLogger("pyrogram").setLevel(logging.WARNING)
import asyncio
import aiohttp
import json
import os
import shutil
import time
from PIL import Image
from datetime import datetime
if bool(os.environ.get("WEBHOOK", False)):
from sample_config import Config
else:
from config import Config
from translation import Translation
from database.database import db
from hachoir.metadata import extractMetadata
from hachoir.parser import createParser
from helper_funcs.display_progress import progress_for_pyrogram, humanbytes, TimeFormatter
async def ddl_call_back(bot, update):
cb_data = update.data
tg_send_type, youtube_dl_format, youtube_dl_ext = cb_data.split("=")
thumb_image_path = Config.DOWNLOAD_LOCATION + \
"/" + str(update.from_user.id) + ".jpg"
youtube_dl_url = update.message.reply_to_message.text
custom_file_name = os.path.basename(youtube_dl_url)
if "|" in youtube_dl_url:
url_parts = youtube_dl_url.split("|")
if len(url_parts) == 2:
youtube_dl_url = url_parts[0]
custom_file_name = url_parts[1]
else:
for entity in update.message.reply_to_message.entities:
if entity.type == "text_link":
youtube_dl_url = entity.url
elif entity.type == "url":
o = entity.offset
l = entity.length
youtube_dl_url = youtube_dl_url[o:o + l]
if youtube_dl_url is not None:
youtube_dl_url = youtube_dl_url.strip()
if custom_file_name is not None:
custom_file_name = custom_file_name.strip()
else:
for entity in update.message.reply_to_message.entities:
if entity.type == "text_link":
youtube_dl_url = entity.url
elif entity.type == "url":
o = entity.offset
l = entity.length
youtube_dl_url = youtube_dl_url[o:o + l]
description = Translation.CUSTOM_CAPTION_UL_FILE
start = datetime.now()
await bot.edit_message_text(
text=Translation.DOWNLOAD_START,
chat_id=update.message.chat.id,
message_id=update.message.message_id
)
tmp_directory_for_each_user = Config.DOWNLOAD_LOCATION + "/" + str(update.from_user.id)
if not os.path.isdir(tmp_directory_for_each_user):
os.makedirs(tmp_directory_for_each_user)
download_directory = tmp_directory_for_each_user + "/" + custom_file_name
command_to_exec = []
async with aiohttp.ClientSession() as session:
c_time = time.time()
try:
await download_coroutine(
bot,
session,
youtube_dl_url,
download_directory,
update.message.chat.id,
update.message.message_id,
c_time
)
except asyncio.TimeOutError:
await bot.edit_message_text(
text=Translation.SLOW_URL_DECED,
chat_id=update.message.chat.id,
message_id=update.message.message_id
)
return False
if os.path.exists(download_directory):
end_one = datetime.now()
await bot.edit_message_text(
text=Translation.UPLOAD_START,
chat_id=update.message.chat.id,
message_id=update.message.message_id
)
file_size = Config.TG_MAX_FILE_SIZE + 1
try:
file_size = os.stat(download_directory).st_size
except FileNotFoundError as exc:
download_directory = os.path.splitext(download_directory)[0] + "." + "mkv"
file_size = os.stat(download_directory).st_size
if file_size > Config.TG_MAX_FILE_SIZE:
await bot.edit_message_text(
chat_id=update.message.chat.id,
text=Translation.RCHD_TG_API_LIMIT,
message_id=update.message.message_id
)
else:
# get the correct width, height, and duration for videos greater than 10MB
width = 0
height = 0
duration = 0
if tg_send_type != "file":
metadata = extractMetadata(createParser(download_directory))
if metadata is not None:
if metadata.has("duration"):
duration = metadata.get('duration').seconds
if os.path.exists(thumb_image_path):
width = 0
height = 0
metadata = extractMetadata(createParser(thumb_image_path))
if metadata.has("width"):
width = metadata.get("width")
if metadata.has("height"):
height = metadata.get("height")
if tg_send_type == "vm":
height = width
Image.open(thumb_image_path).convert(
"RGB").save(thumb_image_path)
img = Image.open(thumb_image_path)
if tg_send_type == "file":
img.resize((320, height))
else:
img.resize((90, height))
img.save(thumb_image_path, "JPEG")
else:
thumb_image_path = None
start_time = time.time()
if (await db.get_upload_as_doc(update.from_user.id)) is False:
thumbnail = await Gthumb01(bot, update)
await bot.send_document(
chat_id=update.message.chat.id,
document=download_directory,
thumb=thumbnail,
caption=description,
reply_to_message_id=update.message.reply_to_message.message_id,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
update.message,
start_time
)
)
else:
width, height, duration = await Mdata01(download_directory)
thumb_image_path = await Gthumb02(bot, update, duration, download_directory)
await bot.send_video(
chat_id=update.message.chat.id,
video=download_directory,
caption=description,
duration=duration,
width=width,
height=height,
supports_streaming=True,
thumb=thumb_image_path,
reply_to_message_id=update.message.reply_to_message.message_id,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
update.message,
start_time
)
)
if tg_send_type == "audio":
duration = await Mdata03(download_directory)
thumbnail = await Gthumb01(bot, update)
await bot.send_audio(
chat_id=update.message.chat.id,
audio=download_directory,
caption=description,
parse_mode="HTML",
duration=duration,
thumb=thumbnail,
reply_to_message_id=update.message.reply_to_message.message_id,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
update.message,
start_time
)
)
elif tg_send_type == "vm":
width, duration = await Mdata02(download_directory)
thumbnail = await Gthumb02(bot, update, duration, download_directory)
await bot.send_video_note(
chat_id=update.message.chat.id,
video_note=download_directory,
duration=duration,
length=width,
thumb=thumbnail,
reply_to_message_id=update.message.reply_to_message.message_id,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
update.message,
start_time
)
)
else:
logger.info("Did this happen? :\\")
end_two = datetime.now()
try:
os.remove(download_directory)
os.remove(thumb_image_path)
except:
pass
time_taken_for_download = (end_one - start).seconds
time_taken_for_upload = (end_two - end_one).seconds
await bot.edit_message_text(
text=Translation.AFTER_SUCCESSFUL_UPLOAD_MSG_WITH_TS.format(time_taken_for_download, time_taken_for_upload),
chat_id=update.message.chat.id,
message_id=update.message.message_id,
disable_web_page_preview=True
)
else:
await bot.edit_message_text(
text=Translation.NO_VOID_FORMAT_FOUND.format("Incorrect Link"),
chat_id=update.message.chat.id,
message_id=update.message.message_id,
disable_web_page_preview=True
)
async def download_coroutine(bot, session, url, file_name, chat_id, message_id, start):
downloaded = 0
display_message = ""
async with session.get(url, timeout=Config.PROCESS_MAX_TIMEOUT) as response:
total_length = int(response.headers["Content-Length"])
content_type = response.headers["Content-Type"]
if "text" in content_type and total_length < 500:
return await response.release()
await bot.edit_message_text(
chat_id,
message_id,
text="""Initiating Download
URL: {}
File Size: {}""".format(url, humanbytes(total_length))
)
with open(file_name, "wb") as f_handle:
while True:
chunk = await response.content.read(Config.CHUNK_SIZE)
if not chunk:
break
f_handle.write(chunk)
downloaded += Config.CHUNK_SIZE
now = time.time()
diff = now - start
if round(diff % 5.00) == 0 or downloaded == total_length:
percentage = downloaded * 100 / total_length
speed = downloaded / diff
elapsed_time = round(diff) * 1000
time_to_completion = round(
(total_length - downloaded) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
try:
current_message = """**Download Status**
URL: {}
File Size: {}
Downloaded: {}
ETA: {}""".format(
url,
humanbytes(total_length),
humanbytes(downloaded),
TimeFormatter(estimated_total_time)
)
if current_message != display_message:
await bot.edit_message_text(
chat_id,
message_id,
text=current_message
)
display_message = current_message
except Exception as e:
logger.info(str(e))
pass
return await response.release()
|
py | 1a390124f88e18ca8519dadcafc9d51128af8dd5 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.0.1"
|
py | 1a390127c9e8d07b13c42ff7893a715548db6f52 | # Copyright 2015 - Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import wsme
from wsme import types as wtypes
from solum.api.controllers import common_types
from solum.api.controllers.v1.datamodel import types as api_types
class Workflow(wtypes.Base):
"""Representation of a Workflow.
A workflow maintains a living creation and deployment of an App.
"""
# (devkulkarni) Added base_url to get around strict validation
# checking of WSME 0.8.0
# https://bugs.launchpad.net/solum/+bug/1491504
# https://bugs.launchpad.net/solum/+bug/1491499
base_url = common_types.Uri
"URI of the base resource."
uri = common_types.Uri
"URI to the resource."
uuid = wtypes.text
"Unique Identifier of the resource"
type = wtypes.text
"The resource type."
id = wtypes.text
updated_at = datetime.datetime
created_at = datetime.datetime
app_id = wtypes.text
wf_id = int
source = wtypes.DictType(wtypes.text, api_types.MultiType(
wtypes.text,
int,
bool,
float))
config = {wtypes.text: wtypes.text}
actions = [wtypes.text]
du_id = wtypes.text
status = wtypes.text
result = wtypes.text
scale_target = int
def __init__(self, *args, **kwargs):
super(Workflow, self).__init__(*args, **kwargs)
@classmethod
def sample(cls):
return cls(
wf_id=1,
config={},
actions={},
source={},
status=''
)
@classmethod
def from_db_model(cls, m, host_url):
json = m.as_dict()
json['type'] = m.__tablename__
json['uri'] = ''
json['uri'] = ('%s/v1/apps/%s/workflows/%s' %
(host_url, m.app_id, m.wf_id))
return cls(**(json))
def as_dict_from_keys(self, keys):
return dict((k, getattr(self, k))
for k in keys
if hasattr(self, k) and
getattr(self, k) != wsme.Unset)
def as_dict(self, db_model):
valid_keys = [attr for attr in db_model.__dict__.keys()
if attr[:2] != '__' and attr != 'as_dict']
base = self.as_dict_from_keys(valid_keys)
attrs = [
'id',
'app_id',
'wf_id',
'source',
'config',
'actions',
'status',
'result',
'scale_target'
]
for a in attrs:
if getattr(self, a) is wsme.Unset:
continue
if getattr(self, a) is None:
continue
base[a] = getattr(self, a)
return base
|
py | 1a3901cfaedaafa5fc15125b9f339db27238b454 | import os
DOCKER_HOST = os.getenv('DOCKER_HOST')
HOSTNAME = os.getenv('HOSTNAME')
|
py | 1a39023db9177ea076ebdbe744322fab670b778c | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Dirk Chang and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import redis
from frappe.model.document import Document
class IOTUserApi(Document):
pass
|
py | 1a3902c409c954b4906efdb2320bde7234fdbfdb | # future
from __future__ import annotations
# stdlib
import functools
from functools import lru_cache
import operator
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
# third party
from google.protobuf.reflection import GeneratedProtocolMessageType
import numpy as np
import torch
# syft absolute
# absolute
import syft as sy
# relative
from . import utils
from .... import logger
from ....proto.core.tensor.share_tensor_pb2 import ShareTensor as ShareTensor_PB
from ...common.serde.deserialize import _deserialize as deserialize
from ...common.serde.serializable import serializable
from ...common.serde.serialize import _serialize as serialize
from ...smpc.store.crypto_store import CryptoStore
from ..passthrough import PassthroughTensor # type: ignore
from .party import Party
METHODS_FORWARD_ALL_SHARES = {
"repeat",
"copy",
"diagonal",
"flatten",
"transpose",
"partition",
"resize",
"ravel",
"compress",
"reshape",
"squeeze",
"swapaxes",
"__pos__",
"__neg__",
"take",
"choose",
}
INPLACE_OPS = {"resize", "put"}
RING_SIZE_TO_OP = {
2: {
"add": operator.xor,
"sub": operator.xor,
"mul": operator.and_,
"lt": operator.lt,
"gt": operator.gt,
"ge": operator.ge,
"le": operator.le,
"eq": operator.eq,
"ne": operator.ne,
},
2
** 32: {
"add": operator.add,
"sub": operator.sub,
"mul": operator.mul,
"lt": operator.lt,
"gt": operator.gt,
"ge": operator.ge,
"le": operator.le,
"eq": operator.eq,
"ne": operator.ne,
},
}
CACHE_CLIENTS: Dict[Party, Any] = {}
def populate_store(*args: List[Any], **kwargs: Dict[Any, Any]) -> None:
ShareTensor.crypto_store.populate_store(*args, **kwargs) # type: ignore
@serializable()
class ShareTensor(PassthroughTensor):
crypto_store = CryptoStore()
__slots__ = (
"rank",
"ring_size",
"clients", # clients connections
"min_value",
"max_value",
"generator_przs",
# Only ShareTensors with seed_przs could be sent over the wire
"seed_przs",
"parties_info",
"nr_parties",
)
def __init__(
self,
rank: int,
parties_info: List[Party],
ring_size: int,
seed_przs: int = 42,
clients: Optional[List[Any]] = None,
value: Optional[Any] = None,
init_clients: bool = False,
) -> None:
# TODO: Ring size needs to be changed to 2^64 (or other specific sizes)
self.rank = rank
self.ring_size = ring_size
self.nr_parties = len(parties_info)
self.parties_info = parties_info
self.clients = []
if clients is not None:
self.clients = clients
elif init_clients: # type: ignore
self.clients = ShareTensor.login_clients(parties_info)
self.min_value, self.max_value = ShareTensor.compute_min_max_from_ring(
self.ring_size
)
# This should be set only in the deserializer
self.generator_przs = None
self.seed_przs = seed_przs
super().__init__(value)
@staticmethod
def login_clients(parties_info: List[Party]) -> Any:
clients = []
for party_info in parties_info:
party_info.url = party_info.url.replace("localhost", "docker-host")
client = CACHE_CLIENTS.get(party_info, None)
if client is None:
# default cache to true, here to prevent multiple logins
# due to gevent monkey patching, context switch is done during
# during socket connection initialization.
CACHE_CLIENTS[party_info] = True
# TODO: refactor to use a guest account
client = sy.login( # nosec
url=party_info.url,
email="[email protected]",
password="changethis",
port=party_info.port,
verbose=False,
)
base_url = client.routes[0].connection.base_url
client.routes[0].connection.base_url = base_url.replace( # type: ignore
"localhost", "docker-host"
)
CACHE_CLIENTS[party_info] = client
clients.append(client)
return clients
def __getitem__(self, item: Union[str, int, slice]) -> ShareTensor:
return ShareTensor(
rank=self.rank,
parties_info=self.parties_info,
ring_size=self.ring_size,
value=self.child[item],
clients=self.clients,
)
def copy_tensor(self) -> ShareTensor:
return ShareTensor(
value=self.child,
rank=self.rank,
parties_info=self.parties_info,
ring_size=self.ring_size,
seed_przs=self.seed_przs,
clients=self.clients,
)
@staticmethod
@lru_cache(32)
def compute_min_max_from_ring(ring_size: int = 2 ** 32) -> Tuple[int, int]:
if ring_size == 2:
min_value, max_value = 0, 1
else:
min_value = (-ring_size) // 2
max_value = (ring_size) // 2 - 1
return min_value, max_value
@staticmethod
@lru_cache(maxsize=None)
def get_op(ring_size: int, op_str: str) -> Callable[..., Any]:
"""Returns method attribute based on ring_size and op_str.
Args:
ring_size (int): Ring size
op_str (str): Operation string.
Returns:
op (Callable[...,Any]): The operation method for the op_str.
Raises:
ValueError : If invalid ring size or op_str is given as input.
"""
ops = RING_SIZE_TO_OP.get(ring_size, None)
if ops is None:
raise ValueError(f"Do not have operations for ring size {ring_size}")
op = ops.get(op_str, None)
if op is None:
raise ValueError(
f"Operator {op_str} does not exist for ring size {ring_size}"
)
return op
""" TODO: Remove this -- we would use generate_przs since the scenario we are testing is that
the secret is remotly
@staticmethod
def generate_shares(secret, nr_shares, ring_size=2 ** 64):
from .fixed_precision_tensor import FixedPrecisionTensor
if not isinstance(secret, (int, FixedPrecisionTensor)):
secret = FixedPrecisionTensor(value=secret)
shape = secret.shape
min_value, max_value = ShareTensor.compute_min_max_from_ring(ring_size)
generator_shares = np.random.default_rng()
random_shares = []
for i in range(nr_shares):
random_value = generator_shares.integers(
low=min_value, high=max_value, size=shape
)
fpt_value = FixedPrecisionTensor(value=random_value)
random_shares.append(fpt_value)
shares_fpt = []
for i in range(nr_shares):
if i == 0:
share = value = random_shares[i]
elif i < nr_shares - 1:
share = random_shares[i] - random_shares[i - 1]
else:
share = secret - random_shares[i - 1]
shares_fpt.append(share)
# Add the ShareTensor class between them
shares = []
for rank, share_fpt in enumerate(shares_fpt):
share_fpt.child = ShareTensor(rank=rank, value=share_fpt.child)
shares.append(share_fpt)
return shares
"""
@staticmethod
def generate_przs(
value: Any,
shape: Tuple[int, ...],
rank: int,
parties_info: List[Party],
ring_size: int = 2 ** 32,
seed_przs: Optional[int] = None,
generator_przs: Optional[Any] = None,
init_clients: bool = True,
) -> "ShareTensor":
nr_parties = len(parties_info)
# Try:
# 1. First get numpy type if secret is numpy and obtain ring size from there
# 2. If not get the type from the ring size
numpy_type = None
ring_size_final = None
ring_size_from_type = utils.TYPE_TO_RING_SIZE.get(
getattr(value, "dtype", None), None
)
if ring_size_from_type is None:
logger.warning("Could not get ring size from {value}")
else:
ring_size_final = ring_size_from_type
numpy_type = value.dtype
if numpy_type is None:
numpy_type = utils.RING_SIZE_TO_TYPE.get(ring_size, None)
ring_size_final = ring_size
if numpy_type is None:
raise ValueError(f"Ring size {ring_size} not known how to be treated")
# relative
from ..tensor import Tensor
if (seed_przs is None) == (generator_przs is None):
raise ValueError("Only seed_przs or generator should be populated")
if value is None:
value = Tensor(np.zeros(shape, dtype=numpy_type))
# TODO: Sending the seed and having each party generate the shares is not safe
# Since the parties would know some of the other parties shares (this might not impose a risk
# when shares are not sent between parties -- like private addition/subtraction, but it might
# impose for multiplication
# The secret holder should generate the shares and send them to the other parties
if generator_przs:
generator_shares = generator_przs
else:
generator_shares = np.random.default_rng(seed_przs)
if isinstance(value.child, ShareTensor):
value = value.child
share = ShareTensor(
value=value.child,
rank=rank,
parties_info=parties_info,
seed_przs=seed_przs, # type: ignore #TODO:Inspect as we could pass none.
init_clients=init_clients,
ring_size=ring_size_final, # type: ignore
)
share.generator_przs = generator_shares
shares = [
generator_shares.integers(
low=share.min_value,
high=share.max_value,
size=shape,
endpoint=True,
dtype=numpy_type,
)
for _ in range(nr_parties)
]
op = ShareTensor.get_op(ring_size_final, "sub")
przs_share = op(shares[rank], shares[(rank + 1) % nr_parties])
share.child = op(share.child, przs_share)
return share
@staticmethod
def generate_przs_on_dp_tensor(
value: Optional[Any],
shape: Tuple[int],
rank: int,
parties_info: List[Party],
seed_przs: int,
share_wrapper: Any,
ring_size: int = 2 ** 32,
) -> PassthroughTensor:
if value is not None:
share = ShareTensor.generate_przs(
value=value.child,
shape=shape,
rank=rank,
parties_info=parties_info,
seed_przs=seed_przs,
ring_size=ring_size,
)
else:
share = ShareTensor.generate_przs(
value=value,
shape=shape,
rank=rank,
parties_info=parties_info,
seed_przs=seed_przs,
ring_size=ring_size,
)
share_wrapper.child.child = share
return share_wrapper
@staticmethod
def sanity_check(
share: Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]
) -> None:
"""Check type for share
Args:
share (Union[int, float, ShareTensor, np.ndarray, torch.Tensor]): value to check
Raises:
ValueError: if type is not supported
"""
if isinstance(share, float):
raise ValueError("Type float not supported yet!")
if isinstance(share, np.ndarray) and (
not np.issubdtype(share.dtype, np.integer)
and share.dtype != np.dtype("bool")
):
raise ValueError(
f"NPArray should have type int or bool, but found {share.dtype}"
)
if isinstance(share, torch.Tensor) and torch.is_floating_point(share):
raise ValueError("Torch tensor should have type int, but found float")
def apply_function(
self, y: Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"], op_str: str
) -> "ShareTensor":
"""Apply a given operation.
Args:
y (Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]): tensor to apply the operator.
op_str (str): Operator.
Returns:
ShareTensor: Result of the operation.
"""
op = ShareTensor.get_op(self.ring_size, op_str)
numpy_type = utils.RING_SIZE_TO_TYPE.get(self.ring_size, None)
if numpy_type is None:
raise ValueError(f"Do not know numpy type for ring size {self.ring_size}")
print("=====================================================")
print("OP", op, numpy_type, self.ring_size)
print("====================================================")
if isinstance(y, ShareTensor):
utils.get_ring_size(self.ring_size, y.ring_size)
value = op(self.child, y.child)
else:
# TODO: Converting y to numpy because doing "numpy op torch tensor" raises exception
value = op(self.child, np.array(y, numpy_type)) # TODO: change to np.int64
res = self.copy_tensor()
res.child = value
return res
def add(
self, y: Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]
) -> "ShareTensor":
"""Apply the "add" operation between "self" and "y".
Args:
y (Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]): self + y
Returns:
ShareTensor. Result of the operation.
"""
ShareTensor.sanity_check(y)
new_share = self.apply_function(y, "add")
return new_share
def sub(
self, y: Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]
) -> "ShareTensor":
"""Apply the "sub" operation between "self" and "y".
Args:
y (Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]): self - y
Returns:
ShareTensor. Result of the operation.
"""
ShareTensor.sanity_check(y)
new_share = self.apply_function(y, "sub")
return new_share
def rsub(
self, y: Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]
) -> "ShareTensor":
"""Apply the "rsub" operation between "self" and "y"
Args:
y (Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]): y - self
Returns:
ShareTensor. Result of the operation.
"""
ShareTensor.sanity_check(y)
new_self = self.mul(-1)
new_share = new_self.apply_function(y, "add")
return new_share
def mul(
self, y: Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]
) -> "ShareTensor":
"""Apply the "mul" operation between "self" and "y".
Args:
y (Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]): self * y
Returns:
ShareTensor. Result of the operation.
"""
# if isinstance(y, ShareTensor):
# raise ValueError(
# "We should not reach this point for private multiplication. Only public one"
# )
ShareTensor.sanity_check(y)
new_share = self.apply_function(y, "mul")
return new_share
def matmul(
self, y: Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]
) -> "ShareTensor":
"""Apply the "matmul" operation between "self" and "y".
Args:
y (Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]): self @ y.
Returns:
ShareTensor: Result of the operation.
"""
if isinstance(y, ShareTensor):
raise ValueError("Private matmul not supported yet")
ShareTensor.sanity_check(y)
new_share = self.apply_function(y, "matmul")
return new_share
def rmatmul(self, y: torch.Tensor) -> "ShareTensor":
"""Apply the "rmatmul" operation between "y" and "self".
Args:
y (torch.Tensor): y @ self
Returns:
ShareTensor. Result of the operation.
"""
if isinstance(y, ShareTensor):
raise ValueError("Private matmul not supported yet")
ShareTensor.sanity_check(y)
new_share = y.apply_function(self, "matmul")
return new_share
def lt(self, y: Union[ShareTensor, np.ndarray]) -> "ShareTensor":
"""Apply the "lt" operation between "y" and "self".
Args:
y (Union[ShareTensor,np.ndarray]): self < y
Returns:
ShareTensor. Result of the operation.
"""
# raise ValueError(
# "It should not reach this point since we generate SMPCAction for this"
# )
ShareTensor.sanity_check(y)
new_share = self.apply_function(y, "lt")
return new_share
def gt(self, y: Union[ShareTensor, np.ndarray]) -> "ShareTensor":
"""Apply the "gt" operation between "y" and "self".
Args:
y (Union[ShareTensor,np.ndarray]): self > y
Returns:
ShareTensor. Result of the operation.
"""
# raise ValueError(
# "It should not reach this point since we generate SMPCAction for this"
# )
ShareTensor.sanity_check(y)
new_share = self.apply_function(y, "gt")
return new_share
def ge(self, y: Union[ShareTensor, np.ndarray]) -> "ShareTensor":
"""Apply the "ge" operation between "y" and "self".
Args:
y (Union[ShareTensor,np.ndarray]): self >= y
Returns:
ShareTensor. Result of the operation.
"""
# raise ValueError(
# "It should not reach this point since we generate SMPCAction for this"
# )
ShareTensor.sanity_check(y)
new_share = self.apply_function(y, "ge")
return new_share
def le(self, y: Union[ShareTensor, np.ndarray]) -> "ShareTensor":
"""Apply the "le" operation between "y" and "self".
Args:
y (Union[ShareTensor,np.ndarray]): self <= y
Returns:
ShareTensor. Result of the operation.
"""
# raise ValueError(
# "It should not reach this point since we generate SMPCAction for this"
# )
ShareTensor.sanity_check(y)
new_share = self.apply_function(y, "le")
return new_share
def ne(self, y: Union[ShareTensor, np.ndarray]) -> "ShareTensor":
"""Apply the "ne" operation between "y" and "self".
Args:
y (Union[ShareTensor,np.ndarray]): self != y
Returns:
ShareTensor. Result of the operation.
"""
# raise ValueError(
# "It should not reach this point since we generate SMPCAction for this"
# )
ShareTensor.sanity_check(y)
new_share = self.apply_function(y, "ne")
return new_share
def bit_decomposition(self) -> "ShareTensor":
"""Apply the "decomposition" operation on self
Args:
None
Returns:
ShareTensor. Result of the operation.
"""
raise ValueError(
"It should not reach this point since we generate SMPCAction for this"
)
def eq(self, other: Any) -> bool:
"""Equal operator.
Check if "self" is equal with another object given a set of
attributes to compare.
Args:
other (Any): Value to compare.
Returns:
bool: True if equal False if not.
"""
# TODO: Rasswanth: Fix later after the comparison operation
# relative
# from .... import Tensor
# if (
# isinstance(self.child, Tensor)
# and isinstance(other.child, Tensor)
# and (self.child != other.child).child.any() # type: ignore
# ):
# return False
# if (
# isinstance(self.child, np.ndarray)
# and isinstance(other.child, np.ndarray)
# and (self.child != other.child).any()
# ):
# return False
# if self.rank != other.rank:
# return False
# if self.ring_size != other.ring_size:
# return False
# if self.nr_parties != other.nr_parties:
# return False
# return True
return self.child == other.child
# TRASK: commenting out because ShareTEnsor doesn't appear to have .session_uuid or .config
# def div(
# self, y: Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]
# ) -> "ShareTensor":
# """Apply the "div" operation between "self" and "y".
#
# Args:
# y (Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]): Denominator.
#
# Returns:
# ShareTensor: Result of the operation.
#
# Raises:
# ValueError: If y is not an integer or LongTensor.
# """
# if not isinstance(y, (int, torch.LongTensor)):
# raise ValueError("Div works (for the moment) only with integers!")
#
# res = ShareTensor(session_uuid=self.session_uuid, config=self.config)
# # res = self.apply_function(y, "floordiv")
# res.tensor = self.tensor // y
# return res
def bit_extraction(self, pos: int = 0) -> ShareTensor:
"""Extracts the bit at the specified position.
Args:
pos (int): position to extract bit.
Returns:
ShareTensor : extracted bits at specific position.
Raises:
ValueError: If invalid position is provided.
"""
ring_bits = utils.get_nr_bits(self.ring_size)
if pos < 0 or pos > ring_bits - 1:
raise ValueError(
f"Invalid position for bit_extraction: {pos}, must be in range:[0,{ring_bits-1}]"
)
shape = self.shape
numpy_type = utils.RING_SIZE_TO_TYPE[self.ring_size]
# logical shift
bit_mask = np.ones(shape, dtype=numpy_type) << pos
value = self.child & bit_mask
value = value.astype(np.bool_)
share = self.copy_tensor()
share.child = value
return share
@staticmethod
def hook_method(__self: ShareTensor, method_name: str) -> Callable[..., Any]:
"""Hook a framework method.
Args:
method_name (str): method to hook
Returns:
A hooked method
"""
def method_all_shares(
_self: ShareTensor, *args: List[Any], **kwargs: Dict[Any, Any]
) -> Any:
share = _self.child
if method_name != "resize":
method = getattr(share, method_name)
else:
# Should be modified to remove copy
# https://stackoverflow.com/questions/23253144/numpy-the-array-doesnt-have-its-own-data
share = share.copy()
method = getattr(share, method_name)
if method_name not in INPLACE_OPS:
new_share = method(*args, **kwargs)
else:
method(*args, **kwargs)
new_share = share
res = _self.copy_tensor()
res.child = new_share
return res
return functools.partial(method_all_shares, __self)
def __getattribute__(self, attr_name: str) -> Any:
if attr_name in METHODS_FORWARD_ALL_SHARES or attr_name in INPLACE_OPS:
return ShareTensor.hook_method(self, attr_name)
return object.__getattribute__(self, attr_name)
def _object2proto(self) -> ShareTensor_PB:
proto_init_kwargs = {
"rank": self.rank,
"parties_info": [serialize(party) for party in self.parties_info],
"seed_przs": self.seed_przs,
"ring_size": sy.serialize(self.ring_size, to_bytes=True),
}
if isinstance(self.child, np.ndarray):
proto_init_kwargs["array"] = serialize(self.child)
elif isinstance(self.child, torch.Tensor):
proto_init_kwargs["array"] = serialize(np.array(self.child))
else:
proto_init_kwargs["tensor"] = serialize(self.child)
return ShareTensor_PB(**proto_init_kwargs)
@staticmethod
def _proto2object(proto: ShareTensor_PB) -> "ShareTensor":
init_kwargs = {
"rank": proto.rank,
"parties_info": [deserialize(party) for party in proto.parties_info],
"seed_przs": proto.seed_przs,
"ring_size": int(sy.deserialize(proto.ring_size, from_bytes=True)),
}
if proto.HasField("tensor"):
init_kwargs["value"] = deserialize(proto.tensor)
else:
init_kwargs["value"] = deserialize(proto.array)
# init_kwargs["init_clients"] = True
res = ShareTensor(**init_kwargs)
generator_przs = np.random.default_rng(proto.seed_przs)
res.generator_przs = generator_przs
return res
@staticmethod
def get_protobuf_schema() -> GeneratedProtocolMessageType:
return ShareTensor_PB
__add__ = add
__radd__ = add
__sub__ = sub
__rsub__ = rsub
__mul__ = mul
__rmul__ = mul
__matmul__ = matmul
__rmatmul__ = rmatmul
__lt__ = lt
__gt__ = gt
__ge__ = ge
__le__ = le
__eq__ = eq
__ne__ = ne
|
py | 1a3902da02ccbb8942dddbccb06a34d1d1d59849 | from __future__ import division, print_function
import sys
import os
import glob
import re
import numpy as np
# Keras and pytorch
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
from keras.models import load_model
import torch
from torch.utils.data import Dataset, Dataloader
import torch.autograd as autograd
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.jit import script, trace
# Flask utils
from flask import Flask, redirect, url_for, request, render_template
from werkzeug.utils import secure_filename
from gevent.pywsgi import WSGIServer
#flask app
app = Flask(__name__)
MODEL_PATH1 = 'decoder.pt'
MODEL_PATH2 = 'encoder.pt'
# Load your trained model
model1 = load_model(MODEL_PATH1)
model1._make_predict_function()
model2 = load_model(MODEL_PATH2)
model2._make_predict_function()
def model_predict(sentence, model1, model2):
sent = sentence.preprocess_input(model1)
x = model2.Tensor(sent)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x, mode='caffe')
preds = model.predict(x)
return preds
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/predict', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
f = request.files['file']
basepath = os.path.dirname(__file__)
file_path = os.path.join(
basepath, 'uploads', secure_filename(f.filename))
f.save(file_path)
# Make prediction
preds = model_predict(sent, model1, model2)
# Simple argmax
pred_class = decode_predictions(preds, top=1)
result = str(pred_class[0][0][1])
return result
return None
if __name__ == '__main__':
app.run(debug=True) |
py | 1a3904503e5c74b051ee97bff06d3c27a66af127 | # qubit number=4
# total number=35
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += CNOT(0,3) # number=12
prog += X(3) # number=13
prog += H(3) # number=28
prog += CZ(0,3) # number=29
prog += H(3) # number=30
prog += Z(3) # number=10
prog += H(1) # number=2
prog += H(2) # number=3
prog += RX(2.708052867394402,1) # number=11
prog += H(3) # number=4
prog += H(0) # number=5
prog += H(1) # number=6
prog += Y(2) # number=16
prog += CNOT(1,0) # number=19
prog += H(3) # number=25
prog += Z(1) # number=20
prog += CNOT(3,0) # number=32
prog += Z(3) # number=33
prog += CNOT(3,0) # number=34
prog += H(0) # number=22
prog += CZ(1,0) # number=23
prog += H(0) # number=24
prog += Z(2) # number=15
prog += H(2) # number=7
prog += H(3) # number=8
prog += Y(2) # number=18
prog += H(0) # number=9
prog += CNOT(1,0) # number=26
prog += CNOT(1,0) # number=27
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil2652.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
py | 1a3904ae93abb7a214f34f33b13a03fc5efbeafc |
"""
This Python module contains not only the class Defensive Warrior, but also the test of
this Python class.
@contents : This module contains not only a single Python class, but also the
test cases to probe its functionality.
@project : N/A
@program : N/A
@file : defensivewarrior.py
@author : Antonio Artes Garcia ([email protected])
Alberto Gil De la Fuente ([email protected])
@version : 0.0.1, 04 January 2021
@information : The Zen of Python
https://www.python.org/dev/peps/pep-0020/
Style Guide for Python Code
https://www.python.org/dev/peps/pep-0008/
Example NumPy Style Python Docstrings
http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_numpy.html
doctest – Testing through documentation
https://pymotw.com/2/doctest/
@copyright : Copyright 2021 GNU AFFERO GENERAL PUBLIC.
All rights are reserved. Reproduction in whole or in part is
prohibited without the written consent of the copyright owner.
"""
# Source packages.
class DefensiveWarrior():
"""Python class to implement a defensive version of a warrior of the game.
This Python class implements the defensive version of a warrior of the game.
Syntax
------
obj = Warrior(id, warrior_type, weapon_type, health_points,
attack_rating, defense_rating,
special_defense_rating)
Parameters
----------
[in] id Id of warrior.
[in] warrior_type Type of warrior.
[in] weapon_type Type of weapon that carries out the warrior.
[in] health_points Points of health that the warrior has.
[in] attack_rating Attack rating of the warrior.
[in] defense_rating Defense rating of the warrior.
[in] special_defense_rating Special Defense rating of the warrior.
Returns
-------
obj Python object output parameter that represents an instance
of the class Warrior.
Attributes
----------
Example
-------
>>> from defensivewarrior import DefensiveWarrior
>>> from warrior_type import WarriorType
>>> from weapon_type import WeaponType
>>> obj_Warrior = DefensiveWarrior(1, WarriorType.BOXER, WeaponType.KICK, 99, 10, 7, 19)
"""
def main():
"""Function main of the module.
The function main of this module is used to test the Class DefensiveWarrior.
Syntax
------
[ ] = main()
Parameters
----------
Null .
Returns
-------
Null .
Example
-------
>>> main()
"""
print("=================================================================.")
print("Test Case 1: Create a Warrior.")
print("=================================================================.")
warrior1 = DefensiveWarrior(1,WarriorType.GLADIATOR, WeaponType.SWORD, 100, 8, 9, 15)
if warrior1.get_warrior_type().name == "GLADIATOR":
print("Test PASS. The parameter warrior_type has been correctly set.")
else:
print("Test FAIL. Check the method __init__().")
if warrior1.get_weapon_type().name == "SWORD":
print("Test PASS. The parameter weapon_type has been correctly set.")
else:
print("Test FAIL. Check the method __init__().")
if warrior1.get_health_points() == 100:
print("Test PASS. The parameter health_points has been correctly set.")
else:
print("Test FAIL. Check the method __init__().")
if warrior1.get_attack_rating() == 8:
print("Test PASS. The parameter attack_rating has been correctly set.")
else:
print("Test FAIL. Check the method __init__().")
if warrior1.get_defense_rating() == 9:
print("Test PASS. The parameter defense_rating has been correctly set.")
else:
print("Test FAIL. Check the method __init__().")
if warrior1.get_special_defense_rating() == 15:
print("Test PASS. The parameter special_defense_rating has been correctly set.")
else:
print("Test FAIL. Check the method __init__().")
print("=================================================================.")
print("Test Case 2: Human-readable format of the object.")
print("=================================================================.")
warrior2 = DefensiveWarrior(2, WarriorType.GLADIATOR, WeaponType.SWORD, 100, 7, 10, 20)
if str(warrior2) == "GLADIATOR with a SWORD":
print("Test PASS. The human-readable format of the object has been implemented correctly.")
else:
print("Test FAIL. Check the method __str__().")
print("=================================================================.")
print("Test Case 3: Warrior alive?¿?.")
print("=================================================================.")
warrior3 = DefensiveWarrior(3, WarriorType.UFC, WeaponType.KICK, 97, 8, 9, 14)
if warrior3.is_alive():
warrior3.fight_defense(200) # With this the warrior should be retired.
if not warrior3.is_alive():
print("Test PASS. The method is_alive() has been implemented correctly.")
else:
print("Test FAIL. Check the method is_alive().")
else:
print("Test FAIL. Check the method is_alive().")
print("=================================================================.")
print("Test Case 4: Check the defense during a Fight.")
print("=================================================================.")
warrior4 = DefensiveWarrior(4, WarriorType.MMA, WeaponType.ELBOW, 93, 9, 6, 14)
warrior4.fight_defense(70)
if (warrior4.get_health_points() == 29) or ((warrior4.get_health_points() == 37)):
print("Test PASS. The method fight_defense() has been implemented correctly.")
else:
print("Test FAIL. Check the method fight_defense().")
print("=================================================================.")
print("Test Case 5: Check the attack during a Fight.")
print("=================================================================.")
warrior5 = DefensiveWarrior(5, WarriorType.BOXER, WeaponType.PUNCH, 99, 10, 7, 18)
warrior6 = DefensiveWarrior(6,WarriorType.BOXER, WeaponType.PUNCH, 99, 9, 8, 17)
warrior_hit = warrior5.fight_attack(warrior6)
if warrior_hit:
if warrior6.get_health_points() == 97:
print("Test PASS. The method fight_attack() has been implemented correctly.")
else:
print("Test FAIL. Check the method fight_attack().")
else:
if warrior6.get_health_points() == 99:
print("Test PASS. The method fight_attack() has been implemented correctly.")
else:
print("Test FAIL. Check the method fight_attack().")
# Checking whether this module is executed just itself alone.
if __name__ == "__main__":
main()
# EOF
|
py | 1a3905a3e4ab49d2c0cddf94fa4fcb1ecac94c32 | """
营销活动相关的路由
"""
from django.urls import path
urlpatterns = [
]
|
py | 1a3905de1d34d30a485d8fdf64c2d0bf4bab5db2 | import torch
from torch.autograd import gradcheck
import kornia.geometry.epipolar as epi
import kornia.testing as utils
class TestSymmetricalEpipolarDistance:
def test_smoke(self, device, dtype):
pts1 = torch.rand(1, 4, 3, device=device, dtype=dtype)
pts2 = torch.rand(1, 4, 3, device=device, dtype=dtype)
Fm = utils.create_random_fundamental_matrix(1).type_as(pts1)
assert epi.symmetrical_epipolar_distance(pts1, pts2, Fm).shape == (1, 4)
def test_batch(self, device, dtype):
batch_size = 5
pts1 = torch.rand(batch_size, 4, 3, device=device, dtype=dtype)
pts2 = torch.rand(batch_size, 4, 3, device=device, dtype=dtype)
Fm = utils.create_random_fundamental_matrix(1).type_as(pts1)
assert epi.symmetrical_epipolar_distance(pts1, pts2, Fm).shape == (5, 4)
def test_gradcheck(self, device):
# generate input data
batch_size, num_points, num_dims = 2, 3, 2
points1 = torch.rand(batch_size, num_points, num_dims, device=device, dtype=torch.float64, requires_grad=True)
points2 = torch.rand(batch_size, num_points, num_dims, device=device, dtype=torch.float64)
Fm = utils.create_random_fundamental_matrix(batch_size).type_as(points2)
assert gradcheck(epi.symmetrical_epipolar_distance, (points1, points2, Fm), raise_exception=True)
class TestSampsonEpipolarDistance:
def test_smoke(self, device, dtype):
pts1 = torch.rand(1, 4, 3, device=device, dtype=dtype)
pts2 = torch.rand(1, 4, 3, device=device, dtype=dtype)
Fm = utils.create_random_fundamental_matrix(1).type_as(pts1)
assert epi.sampson_epipolar_distance(pts1, pts2, Fm).shape == (1, 4)
def test_batch(self, device, dtype):
batch_size = 5
pts1 = torch.rand(batch_size, 4, 3, device=device, dtype=dtype)
pts2 = torch.rand(batch_size, 4, 3, device=device, dtype=dtype)
Fm = utils.create_random_fundamental_matrix(1).type_as(pts1)
assert epi.sampson_epipolar_distance(pts1, pts2, Fm).shape == (5, 4)
def test_gradcheck(self, device):
# generate input data
batch_size, num_points, num_dims = 2, 3, 2
points1 = torch.rand(batch_size, num_points, num_dims, device=device, dtype=torch.float64, requires_grad=True)
points2 = torch.rand(batch_size, num_points, num_dims, device=device, dtype=torch.float64)
Fm = utils.create_random_fundamental_matrix(batch_size).type_as(points2)
assert gradcheck(epi.sampson_epipolar_distance, (points1, points2, Fm), raise_exception=True)
|
py | 1a3905ee460d4975bb7088099be02d9ecfc7d4ba | import mimetypes
from datetime import timedelta, datetime
from enum import Enum
from http.cookiejar import CookieJar
from time import sleep
from typing import List, Optional, AnyStr, TypeVar, TextIO, Tuple, Callable, Dict
from urllib.parse import urlparse, urljoin
from bs4 import BeautifulSoup
from loguru import logger
from requests import Response, Session, Request, PreparedRequest, codes
# noinspection PyUnresolvedReferences,PyProtectedMember
from requests._internal_utils import to_native_string
from requests.adapters import BaseAdapter
from requests.cookies import extract_cookies_to_jar, merge_cookies, cookiejar_from_dict
from requests.exceptions import ChunkedEncodingError, ContentDecodingError, \
TooManyRedirects, RequestException
from requests.sessions import merge_setting, merge_hooks
from requests.structures import CaseInsensitiveDict
from requests.utils import requote_uri, rewind_body, get_netrc_auth
from urllib3.util.url import parse_url, Url
from spoofbot.adapter import FileCache
from spoofbot.operating_system import Windows, WindowsVersion, MacOSX, MacOSXVersion, Linux, LinuxDerivatives, \
random_os, OS
from spoofbot.tag import MimeTypeTag, LanguageTag
from spoofbot.util import ReferrerPolicy, are_same_origin, are_same_site, sort_dict, \
TimelessRequestsCookieJar, random_version, get_firefox_versions, get_chrome_versions
from spoofbot.util.log import log_request, log_response
from numpy.random import choice, poisson
class Destination(Enum):
AUDIO = "audio"
AUDIO_WORKLET = "audioworklet"
DOCUMENT = "document"
EMBED = "embed"
EMPTY = "empty"
FONT = "font"
IMAGE = "image"
MANIFEST = "manifest"
OBJECT = "object"
PAINT_WORKLET = "paintworklet"
REPORT = "report"
SCRIPT = "script"
SERVICE_WORKER = "serviceworker"
SHARED_WORKER = "sharedworker"
STYLE = "style"
TRACK = "track"
VIDEO = "video"
WORKER = "worker"
XSLT = "xslt"
NESTED_DOCUMENT = "nested-document"
class Mode(Enum):
CORS = "cors"
NAVIGATE = "navigate"
NESTED_NAVIGATE = "nested-navigate"
NO_CORS = "no-cors"
SAME_ORIGIN = "same-origin"
WEBSOCKET = "websocket"
class Site(Enum):
CROSS_SITE = "cross-site"
SAME_ORIGIN = "same-origin"
SAME_SITE = "same-site"
NONE = "none"
class User(Enum):
USER_ACTIVATED = "?1"
AUTOMATIC = None
DictOrBytes = TypeVar('DictOrBytes', dict, bytes)
DictOrTupleListOrBytesOrFileLike = TypeVar('DictOrTupleListOrBytesOrFileLike', dict,
List[tuple], bytes, TextIO)
DictOrCookieJar = TypeVar('DictOrCookieJar', dict, CookieJar)
StrOrFileLike = TypeVar('StrOrFileLike', str, TextIO)
AuthTupleOrCallable = TypeVar('AuthTupleOrCallable', Tuple[str, str], Callable)
FloatOrTuple = TypeVar('FloatOrTuple', float, Tuple[float, float])
StrOrBool = TypeVar('StrOrBool', str, bool)
StrOrStrTuple = TypeVar('StrOrStrTuple', str, Tuple[str, str])
class Browser(Session):
"""Basic browser session
Specific browsers must inherit from this class and overwrite the abstract methods
"""
_user_agent: str
_accept: List[MimeTypeTag]
_accept_language: List[LanguageTag]
_accept_encoding: List[str]
_dnt: bool
_upgrade_insecure_requests: bool
_te: str
_connection: str
_last_response: Response
_last_navigate: Response
_last_request_timestamp: datetime
_request_timeout: timedelta
_honor_timeout: bool
_waiting_period: timedelta
_did_wait: bool
_header_precedence: list
_referrer_policy: ReferrerPolicy
_adapter: BaseAdapter
def __init__(self):
super(Browser, self).__init__()
self._name = 'Spoofbot'
from spoofbot import __version__
self._version = __version__
self._user_agent = ''
self._accept = []
self._accept_language = []
self._accept_encoding = []
self._dnt = False
self._upgrade_insecure_requests = False
self._te = 'Trailers'
self._connection = 'keep-alive'
# noinspection PyTypeChecker
self._last_response = None
# noinspection PyTypeChecker
self._last_navigate = None
self._last_request_timestamp = datetime(1, 1, 1)
self._request_timeout = timedelta(seconds=1.0)
self._honor_timeout = True
self._waiting_period = timedelta(seconds=0.0)
self._did_wait = False
self._header_precedence = []
self._referrer_policy = ReferrerPolicy.NO_REFERRER_WHEN_DOWNGRADE
# noinspection PyTypeChecker
self._adapter = self.get_adapter('https://')
@property
def name(self) -> str:
"""Name of the browser"""
return self._name
@property
def version(self) -> str:
"""Version of the browser"""
return self._version
@property
def adapter(self) -> BaseAdapter:
"""Gets the adapter for the HTTP/HTTPS requests
:return: The mounted adapter
:rtype: BaseAdapter
"""
return self._adapter
@adapter.setter
def adapter(self, adapter: BaseAdapter):
"""Sets the adapter for the HTTP/HTTPS requests
:param adapter: The adapter to be mounted
:type adapter: BaseAdapter
"""
self._adapter = adapter
self.mount('https://', adapter)
# noinspection HttpUrlsUsage
self.mount('http://', adapter)
@property
def user_agent(self) -> str:
return self._user_agent
@user_agent.setter
def user_agent(self, value: str):
self._user_agent = value
@property
def accept(self) -> List[MimeTypeTag]:
return self._accept
@accept.setter
def accept(self, value: List[MimeTypeTag]):
self._accept = value
@property
def accept_language(self) -> List[LanguageTag]:
return self._accept_language
@accept_language.setter
def accept_language(self, value: List[LanguageTag]):
self._accept_language = value
@property
def accept_encoding(self) -> List[str]:
return self._accept_encoding
@accept_encoding.setter
def accept_encoding(self, value: List[str]):
self._accept_encoding = value
@property
def do_not_track(self) -> bool:
return self._dnt
@do_not_track.setter
def do_not_track(self, value: bool):
self._dnt = value
@property
def upgrade_insecure_requests(self) -> bool:
return self._upgrade_insecure_requests
@upgrade_insecure_requests.setter
def upgrade_insecure_requests(self, value: bool):
self._upgrade_insecure_requests = value
@property
def transfer_encoding(self) -> str:
return self._te
@transfer_encoding.setter
def transfer_encoding(self, value: str):
self._te = value
@property
def connection(self) -> str:
return self._connection
@connection.setter
def connection(self, value: str):
self._connection = value
@property
def origin(self) -> Optional[Url]:
if self._last_response is None:
return None
last_url = parse_url(self._last_response.url)
return Url(last_url.scheme, host=last_url.host)
@property
def last_response(self) -> Optional[Response]:
return self._last_response
@property
def last_navigate(self) -> Optional[Response]:
return self._last_navigate
@property
def last_request_timestamp(self) -> datetime:
return self._last_request_timestamp
@last_request_timestamp.setter
def last_request_timestamp(self, value: datetime):
self._last_request_timestamp = value
@property
def request_timeout(self) -> timedelta:
return self._request_timeout
@request_timeout.setter
def request_timeout(self, value: timedelta):
self._request_timeout = value
@property
def honor_timeout(self) -> bool:
return self._honor_timeout
@honor_timeout.setter
def honor_timeout(self, value: bool):
self._honor_timeout = value
@property
def waiting_period(self) -> timedelta:
return self._waiting_period
@property
def did_wait(self) -> bool:
return self._did_wait
@property
def header_precedence(self) -> list:
return self._header_precedence
@header_precedence.setter
def header_precedence(self, value: list):
self._header_precedence = value
@staticmethod
def create_user_agent(**kwargs) -> str:
"""Creates a user agent string according to the browsers identity.
:param kwargs: Specific arguments to take into account.
:returns: A custom user agent string.
:rtype: str
"""
raise NotImplementedError
@staticmethod
def create_random_user_agent() -> str:
"""Creates seemingly random user agent string
:returns: A random user agent string.
:rtype: str
"""
raise NotImplementedError
# noinspection DuplicatedCode
def _get_referer(self, url: Url) -> Optional[str]:
if self._last_navigate is None:
return None
nav_url = parse_url(self._last_navigate.url)
return self._referrer_policy.get_referrer(nav_url, url)
def _get_origin(self, method: str, url: Url) -> Optional[str]:
if self._last_navigate is None:
return None
nav_url = parse_url(self._last_navigate.url)
if not are_same_origin(nav_url, url) or method not in ['GET', 'HEAD']:
return self._referrer_policy.get_origin(nav_url, url)
@staticmethod
def _get_host(url: Url) -> str:
if url.port:
return f"{url.hostname}:{url.port}"
return url.hostname
def _get_user_agent(self) -> str:
return self._user_agent
def _get_accept(self, url: Url) -> str:
mime_type, _ = mimetypes.guess_type(url.path if url.path is not None else '')
if mime_type is not None:
return mime_type
return ','.join(map(str, self._accept))
def _get_accept_language(self) -> str:
return ','.join(map(str, self._accept_language))
def _get_accept_encoding(self, url: Url) -> str:
_, enc = mimetypes.guess_type(url.path if url.path is not None else '')
if enc is not None:
return enc
encodings = self._accept_encoding.copy()
if url.scheme != 'https' and 'br' in encodings:
encodings.remove('br')
return ', '.join(encodings)
def _get_connection(self) -> Optional[str]:
if self._connection != '':
return self._connection
def _get_dnt(self) -> Optional[str]:
if self._dnt:
return '1'
def _get_upgrade_insecure_requests(self) -> Optional[str]:
if self._upgrade_insecure_requests:
return '1'
def _get_te(self, url: Url) -> Optional[str]:
if url.scheme == 'https' and self._te != '':
return self._te
@staticmethod
def _get_sec_fetch_dest(dest: Destination) -> str:
# https://www.w3.org/TR/fetch-metadata/#sec-fetch-dest-header
# noinspection SpellCheckingInspection
if dest is None:
dest = Destination.EMPTY
# noinspection PyTypeChecker
return dest.value
def _get_sec_fetch_mode(self, method: str, url: Url) -> str:
# https://www.w3.org/TR/fetch-metadata/#sec-fetch-mode-header
mode = Mode.NO_CORS
if self._last_navigate is None:
mode = Mode.NAVIGATE
# noinspection PyTypeChecker
return mode.value
nav_url = parse_url(self._last_navigate.url)
if are_same_origin(url, nav_url):
mode = Mode.SAME_ORIGIN
if self._get_origin(method, url) is not None:
mode = Mode.CORS
# noinspection PyTypeChecker
return mode.value
def _get_sec_fetch_site(self, url: Url) -> str:
# https://www.w3.org/TR/fetch-metadata/#sec-fetch-site-header
site = Site.SAME_ORIGIN
if self._last_navigate is None:
site = Site.NONE
# noinspection PyTypeChecker
return site.value
nav_url = parse_url(self._last_navigate.url)
if not are_same_origin(url, nav_url):
site = Site.CROSS_SITE
if not are_same_site(url, nav_url):
site = Site.SAME_SITE
# noinspection PyTypeChecker
return site.value
def navigate(self, url: str, **kwargs) -> list[Response]:
"""Sends a GET request to the url and sets it into the Referer header in
subsequent requests
:param url: The url the browser is supposed to connect to
:param kwargs: Additional arguments to forward to the requests module
:returns: The response to the sent request
:rtype: Response
"""
kwargs.setdefault('user_activation', True)
response = self.get(url, **kwargs)
self._last_navigate = response
return self._request_attachments(response)
def _request_attachments(self, response: Response) -> list[Response]:
response.raise_for_status()
responses = [response]
bs = BeautifulSoup(response.content, features='html.parser')
url = parse_url(response.url)
links = self._gather_valid_links(bs, url) + \
self._gather_valid_scripts(bs, url) + \
self._gather_valid_imgs(bs, url)
for link in links:
logger.debug(f"Fetching {link.url}")
try:
resp = self.get(link.url)
responses.append(resp)
except RequestException:
pass
return responses
@staticmethod
def _gather_valid_links(bs: BeautifulSoup, origin: Url) -> list[Url]:
links = []
for link in bs.find_all('link'):
ignore = False
for rel in link.get('rel', None):
# https://developer.mozilla.org/en-US/docs/Web/HTML/Attributes/rel
if rel in {'dns-prefetch', 'preconnect'}:
# DNS resolve and preemptively connecting is useless to us
ignore |= True
elif rel in {'canonical'}:
ignore |= True
elif rel in {'manifest', 'mask-icon'}:
# non-standard and unsupported
ignore |= True
if ignore:
continue
href: str = link.get('href', None)
if href is None:
continue
if href.startswith('/'):
href = f"{origin.scheme}://{origin.hostname}{href}"
links.append(parse_url(href))
return links
@staticmethod
def _gather_valid_scripts(bs: BeautifulSoup, origin: Url) -> list[Url]:
scripts = []
for script in bs.find_all('script'):
if 'src' not in script.attrs:
continue
src: str = script.get('src', None)
if src is None:
continue
if src.startswith('/'):
src = f"{origin.scheme}://{origin.hostname}{src}"
scripts.append(parse_url(src))
return scripts
@staticmethod
def _gather_valid_imgs(bs: BeautifulSoup, origin: Url) -> list[Url]:
scripts = []
for script in bs.find_all('img'):
if 'src' not in script.attrs:
continue
src: str = script.get('src', None)
if src is None:
continue
if src.startswith('/'):
src = f"{origin.scheme}://{origin.hostname}{src}"
scripts.append(parse_url(src))
return scripts
def request(self, method: AnyStr, url: AnyStr, params: DictOrBytes = None,
data: DictOrTupleListOrBytesOrFileLike = None, headers: dict = None,
cookies: DictOrCookieJar = None,
files: StrOrFileLike = None, auth: AuthTupleOrCallable = None,
timeout: FloatOrTuple = None,
allow_redirects=True, proxies: Dict[str, str] = None,
hooks: Dict[str, Callable] = None,
stream: bool = None, verify: StrOrBool = None,
cert: StrOrStrTuple = None,
json: str = None, user_activation: bool = False) -> Response:
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param user_activation: (optional) Indicates that the request was user
initiated.
:param hooks: (optional) Dictionary mapping a hook (only 'request' is
possible) to a Callable.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of ``'filename': file-like-objects``
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol or protocol and
hostname to the URL of the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) Either a boolean, in which case it controls whether we
verify the server's TLS certificate, or a string, in which case it must
be a path to a CA bundle to use. Defaults to ``True``.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
:rtype: requests.Response
"""
cookies = cookies if cookies is not None else self.cookies
self.headers = self._get_default_headers(method, parse_url(url),
user_activation)
self.headers.update(headers if headers else {})
# Create the Request.
req = Request(
method=method.upper(),
url=url,
headers=self.headers,
files=files,
data=data or {},
json=json,
params=params or {},
auth=auth,
cookies=cookies,
hooks=hooks,
)
prep = self.prepare_request(req)
log_request(prep)
prep.headers = CaseInsensitiveDict(
sort_dict(dict(prep.headers), self._header_precedence))
proxies = proxies or {}
settings = self.merge_environment_settings(
prep.url, proxies, stream, verify, cert
)
# Await the request timeout
self.await_timeout(parse_url(prep.url))
# Send the request.
send_kwargs = {
'timeout': timeout,
'allow_redirects': allow_redirects,
}
send_kwargs.update(settings)
req_timestamp = datetime.now()
response = self.send(prep, **send_kwargs)
adapter = self.adapter
if isinstance(adapter, FileCache) and not adapter.hit:
self._last_request_timestamp = req_timestamp
self._last_response = response
log_response(response)
return response
def prepare_request(self, request):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
:rtype: requests.PreparedRequest
"""
cookies = request.cookies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
cookie_jar = self.cookies.__new__(self.cookies.__class__)
cookie_jar.__init__()
if isinstance(cookie_jar, TimelessRequestsCookieJar) and isinstance(
self.cookies, TimelessRequestsCookieJar):
cookie_jar.mock_date = self.cookies.mock_date
merged_cookies = merge_cookies(
merge_cookies(cookie_jar, self.cookies), cookies)
# Set environment's basic authentication if not explicitly set.
auth = request.auth
if self.trust_env and not auth and not self.auth:
auth = get_netrc_auth(request.url)
p = PreparedRequest()
p.prepare(
method=request.method.upper(),
url=request.url,
files=request.files,
data=request.data,
json=request.json,
headers=merge_setting(request.headers, self.headers,
dict_class=CaseInsensitiveDict),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
hooks=merge_hooks(request.hooks, self.hooks),
)
p.headers = CaseInsensitiveDict(
sort_dict(dict(p.headers), self._header_precedence))
return p
# noinspection PyUnresolvedReferences
def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None, yield_requests=False,
**adapter_kwargs):
"""Receives a Response. Returns a generator of Responses or Requests."""
hist = [] # keep track of history
url = self.get_redirect_target(resp)
previous_fragment = urlparse(req.url).fragment
while url:
log_response(resp)
prepared_request = req.copy()
# Update history and keep track of redirects.
# resp.history must ignore the original request in this loop
hist.append(resp)
resp.history = hist[1:]
try:
# noinspection PyStatementEffect
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if len(resp.history) >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects,
response=resp)
# Release the connection back into the pool.
resp.close()
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
# noinspection SpellCheckingInspection
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (to_native_string(parsed_rurl.scheme), url)
# Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2)
parsed = urlparse(url)
if parsed.fragment == '' and previous_fragment:
# noinspection PyProtectedMember
parsed = parsed._replace(fragment=previous_fragment)
elif parsed.fragment:
previous_fragment = parsed.fragment
url = parsed.geturl()
# Facilitate relative 'location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
self.rebuild_method(prepared_request, resp)
# https://github.com/requests/requests/issues/1084
if resp.status_code not in (
codes.temporary_redirect, codes.permanent_redirect):
# https://github.com/requests/requests/issues/3490
purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding')
for header in purged_headers:
prepared_request.headers.pop(header, None)
prepared_request.body = None
parsed_url = parse_url(url)
headers = dict(prepared_request.headers)
if 'Accept-Encoding' in headers:
headers['Accept-Encoding'] = self._get_accept_encoding(parsed_url)
te = self._get_te(parsed_url)
if 'TE' in headers and te is None:
del headers['TE']
elif te is not None:
headers['TE'] = te
uir = self._get_upgrade_insecure_requests()
if 'Upgrade-Insecure-Requests' in headers and uir is None:
del headers['Upgrade-Insecure-Requests']
elif uir is not None:
headers['Upgrade-Insecure-Requests'] = uir
if 'Host' in headers:
headers['Host'] = parsed_url.hostname
origin = self._get_origin(prepared_request.method, parsed_url)
if 'Origin' in headers and origin is None:
del headers['Origin']
elif origin is not None:
headers['Origin'] = origin
try:
del headers['Cookie']
except KeyError:
pass
prepared_request.headers = headers
self._adapt_redirection(prepared_request)
# Extract any cookies sent on the response to the cookiejar
# in the new request. Because we've mutated our copied prepared
# request, use the old one that we haven't yet touched.
# noinspection PyProtectedMember
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
# noinspection PyProtectedMember
merge_cookies(prepared_request._cookies, self.cookies)
# noinspection PyProtectedMember
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# A failed tell() sets `_body_position` to `object()`. This non-None
# value ensures `rewindable` will be True, allowing us to raise an
# UnrewindableBodyError, instead of hanging the connection.
# noinspection PyProtectedMember
rewindable = (
prepared_request._body_position is not None and
('Content-Length' in headers or 'Transfer-Encoding' in headers)
)
# Attempt to rewind consumed file-like object.
if rewindable:
rewind_body(prepared_request)
# Override the original request.
prepared_request.headers = dict(
sort_dict(prepared_request.headers, self._header_precedence))
req = prepared_request
log_request(req)
if yield_requests:
yield req
else:
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
**adapter_kwargs
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
# extract redirect url, if any, for the next loop
url = self.get_redirect_target(resp)
self._last_navigate = resp
yield resp
def _get_default_headers(self, method: str, url: Url,
user_activation: bool) -> CaseInsensitiveDict:
"""Provides the default headers the browser should send when connecting to an
endpoint
The method tries to guess the mimetype and encoding to fill the Accept and
Accept-Encoding headers
:param method: The method of the HTTP request
:param url: The url the browser is supposed to connect to
:returns: A dictionary form of the default headers.
:rtype: OrderedHeaders
"""
return CaseInsensitiveDict(dict(filter(lambda kvp: kvp[1] != '', {
'Host': self._get_host(url),
'User-Agent': self._get_user_agent(),
'Accept': self._get_accept(url),
'Accept-Language': self._get_accept_language(),
'Accept-Encoding': self._get_accept_encoding(url),
'Connection': self._get_connection(),
'Origin': self._get_origin(method, url),
'Referer': self._get_referer(url),
'DNT': self._get_dnt(),
'Upgrade-Insecure-Requests': self._get_upgrade_insecure_requests(),
'TE': self._get_te(url),
}.items())))
def await_timeout(self, url: Url = None):
"""Waits until the request timeout expires.
The delay will be omitted if the last request was a hit in the cache.
Gets called automatically on every request.
"""
if not self._honor_timeout:
return
time_passed = datetime.now() - self._last_request_timestamp
if time_passed < self._request_timeout:
adapter = self.adapter
if url is not None and isinstance(adapter, FileCache) and adapter.is_hit(
url) and adapter.is_active:
logger.debug("Request will be a hit in cache. No need to wait.")
return
time_to_wait = self._request_timeout - time_passed
logger.debug(f"Waiting for {time_to_wait.total_seconds()} seconds.")
sleep(time_to_wait.total_seconds())
self._did_wait = True
return
self._did_wait = False
def _adapt_redirection(self, request: PreparedRequest):
pass
FF_NEWEST = (90, 0)
class Firefox(Browser):
def __init__(self,
os=Windows(),
ff_version=FF_NEWEST,
build_id=20100101,
do_not_track=False,
upgrade_insecure_requests=True):
super(Firefox, self).__init__()
self._name = 'Firefox'
self._version = '.'.join(map(str, ff_version))
self._user_agent = self.create_user_agent(os, ff_version, build_id)
self._accept = [
MimeTypeTag("text", "html"),
MimeTypeTag("application", "xhtml+xml"),
MimeTypeTag("application", "xml", q=0.9),
MimeTypeTag("image", "webp"),
MimeTypeTag("*", "*", q=0.8)
]
self._accept_language = [
LanguageTag("en", "US"),
LanguageTag("en", q=0.5)
]
self._accept_encoding = ['gzip', 'deflate', 'br']
self._dnt = do_not_track
self._upgrade_insecure_requests = upgrade_insecure_requests
self._connection = 'keep-alive'
self._header_precedence = [
'Host',
'User-Agent',
'Accept',
'Accept-Language',
'Accept-Encoding',
'DNT',
'Content-Type',
'Content-Length',
'Origin',
'Connection',
'Referer',
'Cookie',
'Upgrade-Insecure-Requests',
'TE',
]
@staticmethod
def create_user_agent(os: OS = Windows(), version: tuple[int, ...] = FF_NEWEST, build_id: int = 20100101) -> str:
"""Creates a user agent string for Firefox
:param os: The underlying operating system (default :py:class:`Windows`).
:param version: The version of Firefox (default (71, 0)).
:param build_id: The build id of Gecko (default 20100101).
:returns: A custom user agent string.
"""
ff_version = '.'.join(map(str, version))
return f"Mozilla/5.0 ({os}; rv:{ff_version}) " \
f"Gecko/{build_id} " \
f"Firefox/{ff_version}"
@staticmethod
def create_random_user_agent() -> str:
os = random_os()
ff_version = random_version(get_firefox_versions())
return Firefox.create_user_agent(os, ff_version)
CHROME_NEWEST = (92, 0, 4495, 0)
WEBKIT_NEWEST = (537, 36)
class Chrome(Browser):
def __init__(self,
os=Windows(),
chrome_version=CHROME_NEWEST,
webkit_version=WEBKIT_NEWEST,
do_not_track=False,
upgrade_insecure_requests=True):
super(Chrome, self).__init__()
self._name = 'Chrome'
self._version = '.'.join(map(str, chrome_version))
self._user_agent = self.create_user_agent(os=os, version=chrome_version,
webkit_version=webkit_version)
self._accept = [
MimeTypeTag("text", "html"),
MimeTypeTag("application", "xhtml+xml"),
MimeTypeTag("application", "xml", q=0.9),
MimeTypeTag("image", "webp"),
MimeTypeTag("image", "apng"),
MimeTypeTag(q=0.8),
MimeTypeTag("application", "signed-exchange", v='b3', q=0.9),
]
self._accept_language = [
LanguageTag("en", "US"),
LanguageTag("en", q=0.9)
]
self._accept_encoding = ['gzip', 'deflate', 'br']
self._dnt = do_not_track
self._upgrade_insecure_requests = upgrade_insecure_requests
self._connection = 'keep-alive'
self._header_precedence = [
'Host',
'Connection',
'Content-Type',
# 'Content-Length',
'Upgrade-Insecure-Requests',
'User-Agent',
'Sec-Fetch-User',
'Accept',
'Origin',
'Sec-Fetch-Site',
'Sec-Fetch-Mode',
'Referer',
'Accept-Encoding',
'Accept-Language',
# 'DNT',
# 'Cookie',
# 'TE',
]
@staticmethod
def create_user_agent(os=Windows(), version=CHROME_NEWEST,
webkit_version=WEBKIT_NEWEST) -> str:
"""Creates a user agent string for Firefox
:param os: The underlying operating system (default :py:class:`Windows`).
:param version: The version of the underlying webkit
(default `(79, 0, 3945, 88)).
:param webkit_version: The version of Chrome (default: (537, 36)).
:returns: A custom user agent string.
"""
webkit_ver = '.'.join(map(str, webkit_version))
return f"Mozilla/5.0 ({os}) " \
f"AppleWebKit/{webkit_ver} (KHTML, like Gecko) " \
f"Chrome/{'.'.join(map(str, version))} " \
f"Safari/{webkit_ver}"
@staticmethod
def create_random_user_agent() -> str:
os = random_os()
chrome_version = random_version(get_chrome_versions())
return Firefox.create_user_agent(os, chrome_version)
def navigate(self, url: str, **kwargs) -> Response:
if parse_url(url).scheme == 'https':
kwargs.setdefault('headers', {}).setdefault('Sec-Fetch-User', '?1')
kwargs.setdefault('headers', {}).setdefault('Sec-Fetch-Mode', 'navigate')
responses = super(Chrome, self).navigate(url, **kwargs)
self._last_navigate = responses[0]
return responses
def _get_default_headers(self, method: str, url: Url,
user_activation: bool) -> CaseInsensitiveDict:
adjust_accept_encoding = self._last_navigate is None
if adjust_accept_encoding:
self._accept_encoding = ['gzip', 'deflate']
headers = super(Chrome, self)._get_default_headers(method, url, user_activation)
if adjust_accept_encoding:
self._accept_encoding = ['gzip', 'deflate', 'br']
if url.scheme == 'https':
headers['Sec-Fetch-Site'] = self._get_sec_fetch_site(url)
headers['Sec-Fetch-Mode'] = self._get_sec_fetch_mode(method, url)
return headers
def _adapt_redirection(self, request: PreparedRequest):
url = parse_url(request.url)
if 'Host' in request.headers:
del request.headers['Host']
if 'Connection' in request.headers:
del request.headers['Connection']
if 'Accept-Encoding' in request.headers:
request.headers['Accept-Encoding'] = self._get_accept_encoding(url)
if url.scheme == 'https':
request.headers['Sec-Fetch-Site'] = self._get_sec_fetch_site(url)
if self._last_navigate is None:
request.headers['Sec-Fetch-Mode'] = 'navigate'
else:
request.headers['Sec-Fetch-Mode'] = self._get_sec_fetch_mode(
request.method, url)
request.headers = CaseInsensitiveDict(
sort_dict(dict(request.headers), self._header_precedence))
|
py | 1a3906101aad6a3d52b1f9e92bdf176d18aac03e | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class Affine(nn.Module):
def __init__(self,
min_rot, max_rot,
min_shear_x, max_shear_x,
min_shear_y, max_shear_y,
min_scale_x, max_scale_x,
min_scale_y, max_scale_y
):
super(Affine, self).__init__()
self.min_rot = min_rot
self.max_rot = max_rot
self.min_shear_x = min_shear_x
self.max_shear_x = max_shear_x
self.min_shear_y = min_shear_y
self.max_shear_y = max_shear_y
self.min_scale_x = min_scale_x
self.max_scale_x = max_scale_x
self.min_scale_y = min_scale_y
self.max_scale_y = max_scale_y
def forward(self, x):
rot_theta = np.random.uniform(self.min_rot, self.max_rot)
shear_phi_x = np.random.uniform(self.min_shear_x, self.max_shear_x)
shear_psi_y = np.random.uniform(self.min_shear_y, self.max_shear_y)
scale_x = np.random.uniform(self.min_scale_x, self.max_scale_x)
scale_y = np.random.uniform(self.min_scale_y, self.max_scale_y)
rotation_matrix = np.array([[np.cos(rot_theta), np.sin(rot_theta), 0],
[-np.sin(rot_theta), np.cos(rot_theta), 0],
[0, 0, 1]], dtype=np.float32)
shear_matrix = np.array([[1, np.tan(shear_phi_x), 0],
[np.tan(shear_psi_y), 1, 0],
[0, 0, 1]], dtype=np.float32)
scale_matrix = np.array([[scale_x, 0, 0],
[0, scale_y, 0],
[0, 0, 1]], dtype=np.float32)
transformation_matrix = np.dot(np.dot(rotation_matrix, shear_matrix), scale_matrix)[0:2, :]
matrix = torch.FloatTensor(np.stack([transformation_matrix for _ in range(x.size(0))])).cuda()
grid = F.affine_grid(matrix, x.size())
return F.grid_sample(x, grid)
|
py | 1a39062aac73a7b998639485388477d1a0cc0b2d | import argparse
import sys
import os
import subprocess
import shlex
import glob
import irc_bot
def process_arguments(args):
parser = argparse.ArgumentParser(description='configure the irc clients')
parser.add_argument('--txts_path', action='store', help='path to folder with txt files')
parser.add_argument('--bot_script_path', action='store', help='path to the irc_bot.py script', type=str, default='irc_bot.py')
parser.add_argument('--server', action='store', help='the server to connect the bots to', type=str, default='localhost')
parser.add_argument('--max_bots', action='store', help='the maximum number of bots to train', type=int, default=10)
params = vars(parser.parse_args(args))
return params
def start_individual_bot_process(script_path, file_path, server):
python_path = os.popen('which python').read().rstrip()
line = python_path + ' ' + script_path + ' --txt_path ' + file_path + ' --server ' + server
subprocess.Popen(shlex.split(line), shell=False)
if __name__ == '__main__':
params = process_arguments(sys.argv[1:])
txts_path = params['txts_path']
max_bots = params['max_bots']
bot_script_path = params['bot_script_path']
server = params['server']
for file in glob.glob(txts_path + '/*.txt')[:max_bots]:
start_individual_bot_process(script_path=bot_script_path, file_path=file, server=server)
statistic_client = irc_bot.EcoStatistics('HumammadSusej')
statistic_client.connect(server, tls=False)
statistic_client.handle_forever() |
py | 1a3908b1c78d77b0c189d759d43dc51062fdbb64 |
import urllib3, requests, json, time
from getpass import getpass
from requests.auth import HTTPBasicAuth
#disable Cert warning, not a best Practice
urllib3.disable_warnings()
print
print
# Request user API Credentials
api_user=raw_input('Enter API usr:')
api_pwd= getpass()
# Load Json template generator script
import del_render_jinja2
data = json.load(open('del_output.json'))
# Remove PRIME-SERVER and add a working hostname or IP address
url = 'https://PRIME-SERVER/webacs/api/v2/op/devices/deleteDevices.json'
headers = {"Content-Type": "application/json"}
#Call REST API resource
response = requests.put(url, data=json.dumps(data), auth=HTTPBasicAuth(api_user, api_pwd), headers=headers, verify=False)
# Assuming Prime is up 99.9% most of the time Any Error will throw same output which is most likely Auth issue, if you have time you can customize it
try:
# decode response as json
r_json=response.json()
except ValueError:
print "\n----- Error -----\n"
print "---- AUTH FAILED ----"
print "\n--- End of script ---"
exit(0)
# by practice i found that Prime will need sometime to add the delete Job
print "\nDeleteing IPs from Prime .....please wait "
time.sleep(10)
##Show Prime API response in a cleaner way
job_status = r_json['mgmtResponse']['deleteDeviceResult'][0]['deleteStatuses']['deleteStatus']
print "\n=================================================="
print " Bulk Delete Job Status:"
print "-----------------------------\n"
for item in job_status:
for key, value in item.items():
print str(key)+':'+str(value)
print "\n===================================================\n"
print "\n---------- End of script ----------"
|
py | 1a3908d3d9c3a060f29aa39d2951f65b152098a3 | #
# PySNMP MIB module EdgeSwitch-ROUTING6-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/EdgeSwitch-ROUTING6-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:10:56 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint", "ValueSizeConstraint")
fastPath, = mibBuilder.importSymbols("EdgeSwitch-REF-MIB", "fastPath")
Ipv6IfIndex, Ipv6IfIndexOrZero, Ipv6AddressIfIdentifier, Ipv6Address, Ipv6AddressPrefix = mibBuilder.importSymbols("IPV6-TC", "Ipv6IfIndex", "Ipv6IfIndexOrZero", "Ipv6AddressIfIdentifier", "Ipv6Address", "Ipv6AddressPrefix")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
IpAddress, Counter64, Integer32, MibIdentifier, NotificationType, TimeTicks, Bits, ObjectIdentity, ModuleIdentity, Counter32, iso, Gauge32, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "Counter64", "Integer32", "MibIdentifier", "NotificationType", "TimeTicks", "Bits", "ObjectIdentity", "ModuleIdentity", "Counter32", "iso", "Gauge32", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TruthValue, PhysAddress, MacAddress, RowStatus, TextualConvention, VariablePointer, DisplayString, RowPointer, TimeStamp = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "PhysAddress", "MacAddress", "RowStatus", "TextualConvention", "VariablePointer", "DisplayString", "RowPointer", "TimeStamp")
fastPathRoutingIpv6 = ModuleIdentity((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30))
fastPathRoutingIpv6.setRevisions(('2011-01-26 00:00', '2007-05-23 00:00', '2005-09-21 17:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: fastPathRoutingIpv6.setRevisionsDescriptions(('Postal address updated.', 'Ubiquiti branding related changes.', 'Updated for release',))
if mibBuilder.loadTexts: fastPathRoutingIpv6.setLastUpdated('201101260000Z')
if mibBuilder.loadTexts: fastPathRoutingIpv6.setOrganization('Broadcom Inc')
if mibBuilder.loadTexts: fastPathRoutingIpv6.setContactInfo('')
if mibBuilder.loadTexts: fastPathRoutingIpv6.setDescription('The Ubiquiti Private MIB for FastPath Ipv6 Routing')
agentIpv6Group = MibIdentifier((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1))
agentIpv6RoutingMode = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6RoutingMode.setStatus('current')
if mibBuilder.loadTexts: agentIpv6RoutingMode.setDescription('Administratively enables/disables ipv6 unicast routing on the switch.')
agentIpv6InterfaceTable = MibTable((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 2), )
if mibBuilder.loadTexts: agentIpv6InterfaceTable.setStatus('current')
if mibBuilder.loadTexts: agentIpv6InterfaceTable.setDescription('')
agentIpv6InterfaceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 2, 1), ).setIndexNames((0, "EdgeSwitch-ROUTING6-MIB", "agentIpv6InterfaceIfIndex"))
if mibBuilder.loadTexts: agentIpv6InterfaceEntry.setStatus('current')
if mibBuilder.loadTexts: agentIpv6InterfaceEntry.setDescription('')
agentIpv6InterfaceIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6InterfaceIfIndex.setStatus('current')
if mibBuilder.loadTexts: agentIpv6InterfaceIfIndex.setDescription('The IfIndex associated with this instance.')
agentIpv6InterfaceMtuValue = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 2, 1, 2), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1280, 1500), ))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6InterfaceMtuValue.setStatus('current')
if mibBuilder.loadTexts: agentIpv6InterfaceMtuValue.setDescription('Configures the MTU value for this interface.If it is not yet configured, retrieving the value of this object results in a zero value. Setting the value zero to this object effectively un-configures the MTU.')
agentIpv6InterfaceDadTransmits = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6InterfaceDadTransmits.setStatus('current')
if mibBuilder.loadTexts: agentIpv6InterfaceDadTransmits.setDescription('Configures the dad transmits value for this interface.')
agentIpv6InterfaceLinkLocalOnly = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6InterfaceLinkLocalOnly.setStatus('current')
if mibBuilder.loadTexts: agentIpv6InterfaceLinkLocalOnly.setDescription('When enabled, interface is capable of ipv6 operation without a global address. In this case, an eui-64 based link-local address is used. ')
agentIpv6InterfaceIcmpUnreachables = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6InterfaceIcmpUnreachables.setStatus('current')
if mibBuilder.loadTexts: agentIpv6InterfaceIcmpUnreachables.setDescription('If this object is enable, it indicates that ICMPv6 unreachables can be sent on this interface.')
agentIpv6InterfaceAutoconfig = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6InterfaceAutoconfig.setStatus('current')
if mibBuilder.loadTexts: agentIpv6InterfaceAutoconfig.setDescription('If this object is enabled, it indicates that the IPv6 address is automatically generated using IPv6 stateless address auto configuration.')
agentIpv6InterfaceDhcpClient = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6InterfaceDhcpClient.setStatus('current')
if mibBuilder.loadTexts: agentIpv6InterfaceDhcpClient.setDescription('If this object is enabled, the interface uses DHCPv6 Client protocol to acquire an IPv6 address.')
agentIpv6InterfaceIcmpRedirects = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 2, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6InterfaceIcmpRedirects.setStatus('current')
if mibBuilder.loadTexts: agentIpv6InterfaceIcmpRedirects.setDescription('Enable/Disable the sending of ICMPv6 redirect messages in the redirect scenario where the forwarded packet is sent out through the same interface on which the packet was received. ')
agentIpv6RouterAdvertisementTable = MibTable((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 3), )
if mibBuilder.loadTexts: agentIpv6RouterAdvertisementTable.setStatus('current')
if mibBuilder.loadTexts: agentIpv6RouterAdvertisementTable.setDescription('There is no global administrative flag for router advertisement. The global routing flag (agentIpv6RoutingMode) will be used for this purpose. If routing is disabled, router advertisement is disabled as well.')
agentIpv6RouterAdvertisementEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 3, 1), ).setIndexNames((0, "EdgeSwitch-ROUTING6-MIB", "agentIpv6RouterAdvertisementIfIndex"))
if mibBuilder.loadTexts: agentIpv6RouterAdvertisementEntry.setStatus('current')
if mibBuilder.loadTexts: agentIpv6RouterAdvertisementEntry.setDescription('')
agentIpv6RouterAdvertisementIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6RouterAdvertisementIfIndex.setStatus('current')
if mibBuilder.loadTexts: agentIpv6RouterAdvertisementIfIndex.setDescription('Interface Number to configure Router Advertisement on.')
agentIpv6RouterAdvertisementSuppressMode = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 3, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6RouterAdvertisementSuppressMode.setStatus('current')
if mibBuilder.loadTexts: agentIpv6RouterAdvertisementSuppressMode.setDescription('Enable or disable router advertisement suppression on the interface.')
agentIpv6RouterAdvertisementMaxAdvertisementInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 3, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(4, 1800)).clone(600)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6RouterAdvertisementMaxAdvertisementInterval.setStatus('current')
if mibBuilder.loadTexts: agentIpv6RouterAdvertisementMaxAdvertisementInterval.setDescription('Maximum time allowed between sending router advertisements from the interface.')
agentIpv6RouterAdvertisementAdvertisementLifetime = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 3, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(4, 65520)).clone(1800)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6RouterAdvertisementAdvertisementLifetime.setStatus('current')
if mibBuilder.loadTexts: agentIpv6RouterAdvertisementAdvertisementLifetime.setDescription('Value of lifetime field of router advertisement sent from the interface. This value must be greater than or equal to agentIpv6RouterAdvertisementMaxAdvertisementInterval.')
agentIpv6RouterAdvertisementNbrSolicitInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 3, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 3600000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6RouterAdvertisementNbrSolicitInterval.setStatus('current')
if mibBuilder.loadTexts: agentIpv6RouterAdvertisementNbrSolicitInterval.setDescription('Value of retrans time field of router advertisement sent from the interface. A value of 0 means this router does not specifiy the interval.')
agentIpv6RouterAdvertisementReachableTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 3, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 3600000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6RouterAdvertisementReachableTime.setStatus('current')
if mibBuilder.loadTexts: agentIpv6RouterAdvertisementReachableTime.setDescription('Value of reachable time field of router advertisement sent from the interface. A value of 0 means this router does not specifiy the interval.')
agentIpv6RouterAdvertisementManagedFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 3, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6RouterAdvertisementManagedFlag.setStatus('current')
if mibBuilder.loadTexts: agentIpv6RouterAdvertisementManagedFlag.setDescription('Value of managed config field of router advertisement sent from the interface.')
agentIpv6RouterAdvertisementOtherFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 3, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6RouterAdvertisementOtherFlag.setStatus('current')
if mibBuilder.loadTexts: agentIpv6RouterAdvertisementOtherFlag.setDescription('Value of other config field of router advertisement sent from the interface.')
agentIpv6RouterAdvertisementHopLimitUnspecifiedMode = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 3, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6RouterAdvertisementHopLimitUnspecifiedMode.setStatus('current')
if mibBuilder.loadTexts: agentIpv6RouterAdvertisementHopLimitUnspecifiedMode.setDescription('This object configures the router to send Router Advertisements on an interface with unspecified (0) Current Hop Limit value. This will tell the hosts on that link to ignore the Hop Limit from this Router.')
agentIpv6AddrPrefixTable = MibTable((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 4), )
if mibBuilder.loadTexts: agentIpv6AddrPrefixTable.setStatus('current')
if mibBuilder.loadTexts: agentIpv6AddrPrefixTable.setDescription('The list of IPv6 address prefixes of IPv6 interfaces.')
agentIpv6AddrPrefixEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 4, 1), ).setIndexNames((0, "EdgeSwitch-ROUTING6-MIB", "agentIpv6InterfaceIfIndex"), (0, "EdgeSwitch-ROUTING6-MIB", "agentIpv6AddrPrefix"), (0, "EdgeSwitch-ROUTING6-MIB", "agentIpv6AddrPrefixLength"))
if mibBuilder.loadTexts: agentIpv6AddrPrefixEntry.setStatus('current')
if mibBuilder.loadTexts: agentIpv6AddrPrefixEntry.setDescription('An interface entry containing objects of a particular IPv6 address prefix.')
agentIpv6AddrPrefix = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 4, 1, 1), Ipv6AddressPrefix())
if mibBuilder.loadTexts: agentIpv6AddrPrefix.setStatus('current')
if mibBuilder.loadTexts: agentIpv6AddrPrefix.setDescription('The prefix associated with the this interface.')
agentIpv6AddrPrefixLength = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 128))).setUnits('bits')
if mibBuilder.loadTexts: agentIpv6AddrPrefixLength.setStatus('current')
if mibBuilder.loadTexts: agentIpv6AddrPrefixLength.setDescription('The length of the prefix (in bits).')
agentIpv6AddrPrefixOnLinkFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 4, 1, 3), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6AddrPrefixOnLinkFlag.setStatus('current')
if mibBuilder.loadTexts: agentIpv6AddrPrefixOnLinkFlag.setDescription("This object has the value 'true(1)', if this prefix can be used for on-link determination and the value 'false(2)' otherwise.")
agentIpv6AddrPrefixAutonomousFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 4, 1, 4), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6AddrPrefixAutonomousFlag.setStatus('current')
if mibBuilder.loadTexts: agentIpv6AddrPrefixAutonomousFlag.setDescription('Autonomous address configuration flag. When true(1), indicates that this prefix can be used for autonomous address configuration (i.e. can be used to form a local interface address). If false(2), it is not used to autoconfigure a local interface address.')
agentIpv6AddrPrefixAdvPreferredLifetime = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 4, 1, 5), Unsigned32()).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6AddrPrefixAdvPreferredLifetime.setStatus('current')
if mibBuilder.loadTexts: agentIpv6AddrPrefixAdvPreferredLifetime.setDescription('It is the length of time in seconds that this prefix will remain preferred, i.e. time until deprecation. A value of 4,294,967,295 represents infinity. The address generated from a deprecated prefix should no longer be used as a source address in new communications, but packets received on such an interface are processed as expected.')
agentIpv6AddrPrefixAdvValidLifetime = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 4, 1, 6), Unsigned32()).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6AddrPrefixAdvValidLifetime.setStatus('current')
if mibBuilder.loadTexts: agentIpv6AddrPrefixAdvValidLifetime.setDescription('It is the length of time in seconds that this prefix will remain valid, i.e. time until invalidation. A value of 4,294,967,295 represents infinity. The address generated from an invalidated prefix should not appear as the destination or source address of a packet.')
agentIpv6AddrTable = MibTable((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 5), )
if mibBuilder.loadTexts: agentIpv6AddrTable.setStatus('current')
if mibBuilder.loadTexts: agentIpv6AddrTable.setDescription("The table of addressing information relevant to this node's interface addresses.")
agentIpv6AddrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 5, 1), ).setIndexNames((0, "EdgeSwitch-ROUTING6-MIB", "agentIpv6InterfaceIfIndex"), (0, "EdgeSwitch-ROUTING6-MIB", "agentIpv6AddrAddress"))
if mibBuilder.loadTexts: agentIpv6AddrEntry.setStatus('current')
if mibBuilder.loadTexts: agentIpv6AddrEntry.setDescription("The addressing information for one of this node's interface addresses.")
agentIpv6AddrAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 5, 1, 1), Ipv6Address())
if mibBuilder.loadTexts: agentIpv6AddrAddress.setStatus('current')
if mibBuilder.loadTexts: agentIpv6AddrAddress.setDescription("The IPv6 address to which this entry's addressing information pertains.")
agentIpv6AddrPfxLength = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 5, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 128))).setUnits('bits').setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentIpv6AddrPfxLength.setStatus('current')
if mibBuilder.loadTexts: agentIpv6AddrPfxLength.setDescription('The length of the prefix (in bits) associated with the IPv6 address of this entry.')
agentIpv6AddrEui64Flag = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 5, 1, 3), TruthValue()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentIpv6AddrEui64Flag.setStatus('current')
if mibBuilder.loadTexts: agentIpv6AddrEui64Flag.setDescription("This object has the value 'true(1)', if this address uses and eui-64 generated interface identifier and the value 'false(2)' otherwise.")
agentIpv6AddrStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 5, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentIpv6AddrStatus.setStatus('current')
if mibBuilder.loadTexts: agentIpv6AddrStatus.setDescription('Creates a new entry in the Address table. Allowed values are: createAndGo(4) - Creates an entry in this table, associating the address with a given interface. The agentIpv6NetMask object must be set during creation. destroy(6) - Removes the associated address from the interface.')
agentIpv6StaticRouteTable = MibTable((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 6), )
if mibBuilder.loadTexts: agentIpv6StaticRouteTable.setStatus('current')
if mibBuilder.loadTexts: agentIpv6StaticRouteTable.setDescription('IPv6 Static Routing table. This table contains an entry for each valid IPv6 static unicast route that can be used for packet forwarding determination.')
agentIpv6StaticRouteEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 6, 1), ).setIndexNames((0, "EdgeSwitch-ROUTING6-MIB", "agentIpv6StaticRouteDest"), (0, "EdgeSwitch-ROUTING6-MIB", "agentIpv6StaticRoutePfxLength"), (0, "EdgeSwitch-ROUTING6-MIB", "agentIpv6StaticRouteIfIndex"), (0, "EdgeSwitch-ROUTING6-MIB", "agentIpv6StaticRouteNextHop"))
if mibBuilder.loadTexts: agentIpv6StaticRouteEntry.setStatus('current')
if mibBuilder.loadTexts: agentIpv6StaticRouteEntry.setDescription('A routing entry.')
agentIpv6StaticRouteDest = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 6, 1, 1), Ipv6AddressPrefix())
if mibBuilder.loadTexts: agentIpv6StaticRouteDest.setStatus('current')
if mibBuilder.loadTexts: agentIpv6StaticRouteDest.setDescription('The destination IPv6 address of this route. This object may not take a Multicast address value.')
agentIpv6StaticRoutePfxLength = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 6, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 128))).setUnits('bits')
if mibBuilder.loadTexts: agentIpv6StaticRoutePfxLength.setStatus('current')
if mibBuilder.loadTexts: agentIpv6StaticRoutePfxLength.setDescription('Indicates the prefix length of the destination address.')
agentIpv6StaticRouteIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 6, 1, 3), Ipv6IfIndexOrZero())
if mibBuilder.loadTexts: agentIpv6StaticRouteIfIndex.setStatus('current')
if mibBuilder.loadTexts: agentIpv6StaticRouteIfIndex.setDescription('The index value which uniquely identifies the local interface through which the next hop of this route should be reached. The interface identified by a particular value of this index is the same interface as identified by the same value of ipv6IfIndex. For routes with global address next hop this value can be zero.')
agentIpv6StaticRouteNextHop = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 6, 1, 4), Ipv6Address())
if mibBuilder.loadTexts: agentIpv6StaticRouteNextHop.setStatus('current')
if mibBuilder.loadTexts: agentIpv6StaticRouteNextHop.setDescription('The address of the next system en route. ')
agentIpv6StaticRoutePreference = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 6, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)).clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentIpv6StaticRoutePreference.setStatus('current')
if mibBuilder.loadTexts: agentIpv6StaticRoutePreference.setDescription('The routing preference metric for this route. A lower value is more preferred.')
agentIpv6StaticRouteStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 6, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentIpv6StaticRouteStatus.setStatus('current')
if mibBuilder.loadTexts: agentIpv6StaticRouteStatus.setDescription('Creates a new entry in the Address table. Allowed values are: createAndGo(4) - Creates an entry in this table. destroy(6) - Removes the associated route from the interface.')
agentIpv6ServicePortGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 7))
agentIpv6ServicePortPrefixTable = MibTable((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 7, 1), )
if mibBuilder.loadTexts: agentIpv6ServicePortPrefixTable.setStatus('current')
if mibBuilder.loadTexts: agentIpv6ServicePortPrefixTable.setDescription('IPv6 Service Port Prefix Table. This table contains an entry for each valid IPv6 prefix configured on the Service Port.')
agentIpv6ServicePortPrefixEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 7, 1, 1), ).setIndexNames((0, "EdgeSwitch-ROUTING6-MIB", "agentIpv6ServicePortPrefixIndex"))
if mibBuilder.loadTexts: agentIpv6ServicePortPrefixEntry.setStatus('current')
if mibBuilder.loadTexts: agentIpv6ServicePortPrefixEntry.setDescription('An IPv6 Service Port Prefix entry.')
agentIpv6ServicePortPrefixIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 7, 1, 1, 1), Unsigned32())
if mibBuilder.loadTexts: agentIpv6ServicePortPrefixIndex.setStatus('current')
if mibBuilder.loadTexts: agentIpv6ServicePortPrefixIndex.setDescription('The index of the Ipv6 Prefix Address configured on the Service Port. Removal of a row will cause index values to be reassigned.')
agentIpv6ServicePortPrefix = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 7, 1, 1, 2), Ipv6Address()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6ServicePortPrefix.setStatus('current')
if mibBuilder.loadTexts: agentIpv6ServicePortPrefix.setDescription('The Ipv6 Prefix Address configured on the Service Port.')
agentIpv6ServicePortPrefixLength = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 7, 1, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6ServicePortPrefixLength.setStatus('current')
if mibBuilder.loadTexts: agentIpv6ServicePortPrefixLength.setDescription('The length of the IPv6 Prefix Address.')
agentIpv6ServicePortDefaultRouterTable = MibTable((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 7, 2), )
if mibBuilder.loadTexts: agentIpv6ServicePortDefaultRouterTable.setStatus('current')
if mibBuilder.loadTexts: agentIpv6ServicePortDefaultRouterTable.setDescription('IPv6 Service Port Default Router Table. This table contains an entry for each valid IPv6 Default Router configured on the Service Port.')
agentIpv6ServicePortDefaultRouterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 7, 2, 1), ).setIndexNames((0, "EdgeSwitch-ROUTING6-MIB", "agentIpv6ServicePortDefaultRouterIndex"))
if mibBuilder.loadTexts: agentIpv6ServicePortDefaultRouterEntry.setStatus('current')
if mibBuilder.loadTexts: agentIpv6ServicePortDefaultRouterEntry.setDescription('An IPv6 Service Port Default Router entry.')
agentIpv6ServicePortDefaultRouterIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 7, 2, 1, 1), Unsigned32())
if mibBuilder.loadTexts: agentIpv6ServicePortDefaultRouterIndex.setStatus('current')
if mibBuilder.loadTexts: agentIpv6ServicePortDefaultRouterIndex.setDescription('The index of the IPv6 Default Router Address configured on the Service Port. Removal of a row will cause index values to be reassigned.')
agentIpv6ServicePortDefaultRouter = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 7, 2, 1, 2), Ipv6Address()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6ServicePortDefaultRouter.setStatus('current')
if mibBuilder.loadTexts: agentIpv6ServicePortDefaultRouter.setDescription('The Address of the IPv6 Default Router configured on the Service Port.')
agentIpv6ServicePortNbrTable = MibTable((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 7, 3), )
if mibBuilder.loadTexts: agentIpv6ServicePortNbrTable.setStatus('current')
if mibBuilder.loadTexts: agentIpv6ServicePortNbrTable.setDescription('IPv6 Service Port Neighbor Table. This table contains an entry for each valid IPv6 Neighbor configured on the Service Port.')
agentIpv6ServicePortNbrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 7, 3, 1), ).setIndexNames((0, "EdgeSwitch-ROUTING6-MIB", "agentIpv6ServicePortNbrAddr"))
if mibBuilder.loadTexts: agentIpv6ServicePortNbrEntry.setStatus('current')
if mibBuilder.loadTexts: agentIpv6ServicePortNbrEntry.setDescription('An IPv6 Service Port Neighbor entry.')
agentIpv6ServicePortNbrAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 7, 3, 1, 1), Ipv6Address())
if mibBuilder.loadTexts: agentIpv6ServicePortNbrAddr.setStatus('current')
if mibBuilder.loadTexts: agentIpv6ServicePortNbrAddr.setDescription('The Ipv6 Address of a neighbor switch visible to the Service Port.')
agentIpv6ServicePortNbrPhysAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 7, 3, 1, 2), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6ServicePortNbrPhysAddr.setStatus('current')
if mibBuilder.loadTexts: agentIpv6ServicePortNbrPhysAddr.setDescription('The MacAddress of the neighboring switch.')
agentIpv6ServicePortNbrState = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 7, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 6))).clone(namedValues=NamedValues(("reachable", 1), ("stale", 2), ("delay", 3), ("probe", 4), ("unknown", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6ServicePortNbrState.setStatus('current')
if mibBuilder.loadTexts: agentIpv6ServicePortNbrState.setDescription('The state of the neighboring switch: reachable(1) - The neighbor is reachable by this switch. stale(2) - Information about the neighbor is scheduled for deletion. delay(3) - No information has been received from neighbor during delay period. probe(4) - Switch is attempting to probe for this neighbor. unknown(6) - Unknown status.')
agentIpv6ServicePortNbrUpdated = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 7, 3, 1, 4), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6ServicePortNbrUpdated.setStatus('current')
if mibBuilder.loadTexts: agentIpv6ServicePortNbrUpdated.setDescription('The last sysUpTime that this neighbor has been updated.')
agentIpv6ServicePortNbrIsRouter = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 7, 3, 1, 5), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6ServicePortNbrIsRouter.setStatus('current')
if mibBuilder.loadTexts: agentIpv6ServicePortNbrIsRouter.setDescription('Returns true(1) if the neighbor machine is a router, false(2) otherwise.')
agentIpv6ServicePortNbrType = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 7, 3, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("dynamic", 2), ("static", 3), ("local", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6ServicePortNbrType.setStatus('current')
if mibBuilder.loadTexts: agentIpv6ServicePortNbrType.setDescription("The type of the mapping. The 'dynamic(2)' type indicates that the IPv6 address to physical addresses mapping has been dynamically resolved using the IPv6 Neighbor Discovery protocol. The static(3)' types indicates that the mapping has been statically configured. The local(4) indicates that the mapping is provided for an entity's own interface address.")
agentIpv6ServicePortNbrCfgTable = MibTable((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 7, 4), )
if mibBuilder.loadTexts: agentIpv6ServicePortNbrCfgTable.setStatus('current')
if mibBuilder.loadTexts: agentIpv6ServicePortNbrCfgTable.setDescription('The table contains an entry for each static IPv6 Neighbor on the Service Port.')
agentIpv6ServicePortNbrCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 7, 4, 1), ).setIndexNames((0, "EdgeSwitch-ROUTING6-MIB", "agentIpv6ServicePortNbrCfgAddr"))
if mibBuilder.loadTexts: agentIpv6ServicePortNbrCfgEntry.setStatus('current')
if mibBuilder.loadTexts: agentIpv6ServicePortNbrCfgEntry.setDescription('An entry of the static IPv6 Neighbor on the Service Port.')
agentIpv6ServicePortNbrCfgAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 7, 4, 1, 1), Ipv6Address())
if mibBuilder.loadTexts: agentIpv6ServicePortNbrCfgAddr.setStatus('current')
if mibBuilder.loadTexts: agentIpv6ServicePortNbrCfgAddr.setDescription('The Ipv6 Address of a static neighbor on the Service Port.')
agentIpv6ServicePortNbrCfgPhysAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 7, 4, 1, 2), MacAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentIpv6ServicePortNbrCfgPhysAddr.setStatus('current')
if mibBuilder.loadTexts: agentIpv6ServicePortNbrCfgPhysAddr.setDescription('The MAC Address of a static neighbor on the Service Port.')
agentIpv6ServicePortNbrCfgEntryStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 7, 4, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentIpv6ServicePortNbrCfgEntryStatus.setStatus('current')
if mibBuilder.loadTexts: agentIpv6ServicePortNbrCfgEntryStatus.setDescription('Create or delete the static neighbor entry on the Service Port. The configured static neighbor entry is always active.')
agentIpv6IcmpControlGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 8))
agentIpv6IcmpRateLimitInterval = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 8, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6IcmpRateLimitInterval.setStatus('current')
if mibBuilder.loadTexts: agentIpv6IcmpRateLimitInterval.setDescription('Specifies the time interval between tokens being placed in the bucket for ICMP Ratelimit.')
agentIpv6IcmpRateLimitBurstSize = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 8, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 200))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6IcmpRateLimitBurstSize.setStatus('current')
if mibBuilder.loadTexts: agentIpv6IcmpRateLimitBurstSize.setDescription('Specifies the number of tokens to be placed after timeout.')
agentDhcp6ClientParametersTable = MibTable((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 9), )
if mibBuilder.loadTexts: agentDhcp6ClientParametersTable.setStatus('current')
if mibBuilder.loadTexts: agentDhcp6ClientParametersTable.setDescription('Information about the DHCPv6 Client parameters on the interfaces.')
agentDhcp6ClientParametersEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 9, 1), ).setIndexNames((0, "EdgeSwitch-ROUTING6-MIB", "agentDhcp6ClientInterfaceIndex"))
if mibBuilder.loadTexts: agentDhcp6ClientParametersEntry.setStatus('current')
if mibBuilder.loadTexts: agentDhcp6ClientParametersEntry.setDescription('DHCPv6 Client information on an interface.')
agentDhcp6ClientInterfaceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 9, 1, 1), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentDhcp6ClientInterfaceIndex.setStatus('current')
if mibBuilder.loadTexts: agentDhcp6ClientInterfaceIndex.setDescription('The Interface index on which the IPv6 address is leased by the DHCPv6 Server.')
agentDhcp6ClientPrefix = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 9, 1, 2), Ipv6AddressPrefix()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentDhcp6ClientPrefix.setStatus('current')
if mibBuilder.loadTexts: agentDhcp6ClientPrefix.setDescription('The IPv6 Prefix leased by the DHCPv6 Server.')
agentDhcp6ClientPrefixlength = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 9, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentDhcp6ClientPrefixlength.setStatus('current')
if mibBuilder.loadTexts: agentDhcp6ClientPrefixlength.setDescription('The Prefix length corresponding to the IPv6 Prefix leased by the DHCPv6 Server.')
agentDhcp6ClientState = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 9, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("inactive", 0), ("solicit", 1), ("request", 2), ("active", 3), ("renew", 4), ("rebind", 5), ("release", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentDhcp6ClientState.setStatus('current')
if mibBuilder.loadTexts: agentDhcp6ClientState.setDescription('The State of the DHCPv6 Client on this interface.')
agentDhcp6ClientServerDUID = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 9, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentDhcp6ClientServerDUID.setStatus('current')
if mibBuilder.loadTexts: agentDhcp6ClientServerDUID.setDescription('The DHCPv6 Unique Identifier of the DHCPv6 Server on this interface.')
agentDhcp6ClientT1Time = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 9, 1, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentDhcp6ClientT1Time.setStatus('current')
if mibBuilder.loadTexts: agentDhcp6ClientT1Time.setDescription('The T1 (in seconds) time as indicated by the DHCPv6 Server. T1 value indicates the time interval after which the address is requested for renewal.')
agentDhcp6ClientT2Time = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 9, 1, 7), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentDhcp6ClientT2Time.setStatus('current')
if mibBuilder.loadTexts: agentDhcp6ClientT2Time.setDescription('The T2 (in seconds) time as indicated by the DHCPv6 Server. T2 value indicates the time interval after which the Client sends Rebind message to the Server incase there are no replies to the Renew messages.')
agentDhcp6ClientIAID = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 9, 1, 8), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentDhcp6ClientIAID.setStatus('current')
if mibBuilder.loadTexts: agentDhcp6ClientIAID.setDescription('An identifier for an identity association chosen by this Client.')
agentDhcp6ClientPreferredLifeTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 9, 1, 9), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentDhcp6ClientPreferredLifeTime.setStatus('current')
if mibBuilder.loadTexts: agentDhcp6ClientPreferredLifeTime.setDescription('The time (in seconds) that the IPv6 address is leased by the DHCPv6 Server.')
agentDhcp6ClientValidLifeTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 9, 1, 10), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentDhcp6ClientValidLifeTime.setStatus('current')
if mibBuilder.loadTexts: agentDhcp6ClientValidLifeTime.setDescription('The time (in seconds) that the IPv6 address is leased by the DHCPv6 Server.')
agentDhcp6ClientRenewTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 9, 1, 11), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentDhcp6ClientRenewTime.setStatus('current')
if mibBuilder.loadTexts: agentDhcp6ClientRenewTime.setDescription('The time (in seconds) remaining to send a DHCPv6 Renew request to DHCPv6 Server for the leased address.')
agentDhcp6ClientExpireTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 9, 1, 12), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentDhcp6ClientExpireTime.setStatus('current')
if mibBuilder.loadTexts: agentDhcp6ClientExpireTime.setDescription('The time (in seconds) when the DHCPv6 leased address expires.')
agentIpv6RoutingTableSummaryGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10))
agentIpv6ConnectedRoutes = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6ConnectedRoutes.setStatus('current')
if mibBuilder.loadTexts: agentIpv6ConnectedRoutes.setDescription('The number of connected routes in the IPv6 routing table.')
agentIpv6StaticRoutes = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6StaticRoutes.setStatus('current')
if mibBuilder.loadTexts: agentIpv6StaticRoutes.setDescription('The number of static routes in the IPv6 routing table.')
agentIpv66to4Routes = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv66to4Routes.setStatus('current')
if mibBuilder.loadTexts: agentIpv66to4Routes.setDescription('The number of 6to4 routes in the IPv6 routing table.')
agentIpv6OspfRoutes = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6OspfRoutes.setStatus('current')
if mibBuilder.loadTexts: agentIpv6OspfRoutes.setDescription('The number of OSPFv2 routes in the IPv6 routing table.')
agentIpv6OspfIntraRoutes = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6OspfIntraRoutes.setStatus('current')
if mibBuilder.loadTexts: agentIpv6OspfIntraRoutes.setDescription('The number of OSPFv2 intra-area routes in the IPv6 routing table.')
agentIpv6OspfInterRoutes = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6OspfInterRoutes.setStatus('current')
if mibBuilder.loadTexts: agentIpv6OspfInterRoutes.setDescription('The number of OSPFv2 inter-area routes in the IPv6 routing table.')
agentIpv6OspfExt1Routes = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6OspfExt1Routes.setStatus('current')
if mibBuilder.loadTexts: agentIpv6OspfExt1Routes.setDescription('The number of OSPFv2 external type 1 routes in the IPv6 routing table.')
agentIpv6OspfExt2Routes = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6OspfExt2Routes.setStatus('current')
if mibBuilder.loadTexts: agentIpv6OspfExt2Routes.setDescription('The number of OSPFv2 external type 2 routes in the IPv6 routing table.')
agentIpv6BgpRoutes = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 9), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6BgpRoutes.setStatus('current')
if mibBuilder.loadTexts: agentIpv6BgpRoutes.setDescription('The number of BGP routes in the IPv6 routing table.')
agentIpv6EbgpRoutes = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 10), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6EbgpRoutes.setStatus('current')
if mibBuilder.loadTexts: agentIpv6EbgpRoutes.setDescription('The number of external BGP routes in the IPv6 routing table.')
agentIpv6IbgpRoutes = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 11), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6IbgpRoutes.setStatus('current')
if mibBuilder.loadTexts: agentIpv6IbgpRoutes.setDescription('The number of internal BGP routes in the IPv6 routing table.')
agentIpv6LocalBgpRoutes = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 12), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6LocalBgpRoutes.setStatus('current')
if mibBuilder.loadTexts: agentIpv6LocalBgpRoutes.setDescription('The number of local BGP routes in the IPv6 routing table.')
agentIpv6RejectRoutes = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 13), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6RejectRoutes.setStatus('current')
if mibBuilder.loadTexts: agentIpv6RejectRoutes.setDescription('The number of reject routes in the IPv6 routing table.')
agentIpv6TotalRoutes = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 14), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6TotalRoutes.setStatus('current')
if mibBuilder.loadTexts: agentIpv6TotalRoutes.setDescription('The number of routes in the IPv6 routing table.')
agentIpv6BestRoutes = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 15), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6BestRoutes.setStatus('current')
if mibBuilder.loadTexts: agentIpv6BestRoutes.setDescription('The number of IPv6 routes in the forwarding table.')
agentIpv6BestRoutesHigh = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 16), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6BestRoutesHigh.setStatus('current')
if mibBuilder.loadTexts: agentIpv6BestRoutesHigh.setDescription('The highest number of IPv6 routes in the forwarding table.')
agentIpv6AlternateRoutes = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 17), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6AlternateRoutes.setStatus('current')
if mibBuilder.loadTexts: agentIpv6AlternateRoutes.setDescription('The number of alternate routes in the IPv6 routing table. An alternate route is less preferred than the best route and is not used for forwarding.')
agentIpv6RouteAdds = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6RouteAdds.setStatus('current')
if mibBuilder.loadTexts: agentIpv6RouteAdds.setDescription('The number of routes added to the IPv6 routing table.')
agentIpv6RouteModifies = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6RouteModifies.setStatus('current')
if mibBuilder.loadTexts: agentIpv6RouteModifies.setDescription('The number of routes changed in the IPv6 routing table.')
agentIpv6RouteDeletes = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6RouteDeletes.setStatus('current')
if mibBuilder.loadTexts: agentIpv6RouteDeletes.setDescription('The number of routes removed from the IPv6 routing table.')
agentIpv6UnresolvedRouteAdds = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6UnresolvedRouteAdds.setStatus('current')
if mibBuilder.loadTexts: agentIpv6UnresolvedRouteAdds.setDescription("The number of IPv6 route adds that failed because none of the route's next hops were on a local subnet.")
agentIpv6InvalidRouteAdds = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6InvalidRouteAdds.setStatus('current')
if mibBuilder.loadTexts: agentIpv6InvalidRouteAdds.setDescription('The number of IPv6 routes adds that failed because the route was invalid.')
agentIpv6FailedRouteAdds = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6FailedRouteAdds.setStatus('current')
if mibBuilder.loadTexts: agentIpv6FailedRouteAdds.setDescription('The number of IPv6 routes adds that failed because of a resource limitation in the routing table')
agentIpv6ReservedLocals = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 24), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6ReservedLocals.setStatus('current')
if mibBuilder.loadTexts: agentIpv6ReservedLocals.setDescription('The number of IPv6 routing table entries reserved for a local subnet on a routing interface that is down. ')
agentIpv6UniqueNextHops = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 25), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6UniqueNextHops.setStatus('current')
if mibBuilder.loadTexts: agentIpv6UniqueNextHops.setDescription('The number of distinct next hops used among all IPv6 routes currently in the routing table. These include local interfaces for local routes and neighbors for indirect routes.')
agentIpv6UniqueNextHopsHigh = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 26), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6UniqueNextHopsHigh.setStatus('current')
if mibBuilder.loadTexts: agentIpv6UniqueNextHopsHigh.setDescription('The highest count of unique next hops since counters were last cleared.')
agentIpv6NextHopGroups = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 27), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6NextHopGroups.setStatus('current')
if mibBuilder.loadTexts: agentIpv6NextHopGroups.setDescription('The current number of next hop groups in use by one or more routes. Each next hop group includes one or more next hops.')
agentIpv6NextHopGroupsHigh = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 28), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6NextHopGroupsHigh.setStatus('current')
if mibBuilder.loadTexts: agentIpv6NextHopGroupsHigh.setDescription('The highest count of next hop groups since counters were last cleared.')
agentIpv6EcmpGroups = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 29), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6EcmpGroups.setStatus('current')
if mibBuilder.loadTexts: agentIpv6EcmpGroups.setDescription('The number of next hop groups with multiple next hops.')
agentIpv6EcmpGroupsHigh = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 30), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6EcmpGroupsHigh.setStatus('current')
if mibBuilder.loadTexts: agentIpv6EcmpGroupsHigh.setDescription('The high water mark of the number of ECMP groups.')
agentIpv6EcmpRoutes = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 31), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6EcmpRoutes.setStatus('current')
if mibBuilder.loadTexts: agentIpv6EcmpRoutes.setDescription('The current number of IPv6 routes with multiple next hops.')
agentIpv6TruncEcmpRoutes = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 32), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6TruncEcmpRoutes.setStatus('current')
if mibBuilder.loadTexts: agentIpv6TruncEcmpRoutes.setDescription('The number of ECMP routes that are currently installed in the forwarding table with just one next hop. The forwarding table may limit the number of ECMP routes or the number of ECMP groups. When an ECMP route cannot be installed because such a limit is reached, the route is installed with a single next hop.')
agentIpv6EcmpRetries = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 10, 33), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6EcmpRetries.setStatus('current')
if mibBuilder.loadTexts: agentIpv6EcmpRetries.setDescription('The number of ECMP routes that have been installed in the forwarding table after initially being installed with a single next hop.')
agentIpv6EcmpCountTable = MibTable((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 11), )
if mibBuilder.loadTexts: agentIpv6EcmpCountTable.setStatus('current')
if mibBuilder.loadTexts: agentIpv6EcmpCountTable.setDescription('A count of the number of routes with each number of ECMP next hops. A walk of this table only returns the entries with a non-zero value for agentIpv6EcmpRouteCount.')
agentIpv6EcmpCountEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 11, 1), ).setIndexNames((0, "EdgeSwitch-ROUTING6-MIB", "agentIpv6EcmpNextHopCount"))
if mibBuilder.loadTexts: agentIpv6EcmpCountEntry.setStatus('current')
if mibBuilder.loadTexts: agentIpv6EcmpCountEntry.setDescription('Number of IPv6 routes with a given number of next hops.')
agentIpv6EcmpNextHopCount = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 11, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 128)))
if mibBuilder.loadTexts: agentIpv6EcmpNextHopCount.setStatus('current')
if mibBuilder.loadTexts: agentIpv6EcmpNextHopCount.setDescription('The number of next hops in a route. From 0 to the maximum number of next hops in an ECMP route. The maximum varies by platform.')
agentIpv6EcmpRouteCount = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 11, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6EcmpRouteCount.setStatus('current')
if mibBuilder.loadTexts: agentIpv6EcmpRouteCount.setDescription('The number of IPv6 routes with agentIpv6EcmpNextHopCount next hops.')
agentIpv6NetworkPortGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 12))
agentIpv6NetworkPortNbrTable = MibTable((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 12, 1), )
if mibBuilder.loadTexts: agentIpv6NetworkPortNbrTable.setStatus('current')
if mibBuilder.loadTexts: agentIpv6NetworkPortNbrTable.setDescription('IPv6 Network Port Neighbor Table. This table contains an entry for each valid IPv6 Neighbor configured on the Network Port.')
agentIpv6NetworkPortNbrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 12, 1, 1), ).setIndexNames((0, "EdgeSwitch-ROUTING6-MIB", "agentIpv6NetworkPortNbrAddr"))
if mibBuilder.loadTexts: agentIpv6NetworkPortNbrEntry.setStatus('current')
if mibBuilder.loadTexts: agentIpv6NetworkPortNbrEntry.setDescription('An IPv6 Network Port Neighbor entry.')
agentIpv6NetworkPortNbrAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 12, 1, 1, 1), Ipv6Address())
if mibBuilder.loadTexts: agentIpv6NetworkPortNbrAddr.setStatus('current')
if mibBuilder.loadTexts: agentIpv6NetworkPortNbrAddr.setDescription('The Ipv6 Address of a neighbor switch visible to the Network Port.')
agentIpv6NetworkPortNbrPhysAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 12, 1, 1, 2), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6NetworkPortNbrPhysAddr.setStatus('current')
if mibBuilder.loadTexts: agentIpv6NetworkPortNbrPhysAddr.setDescription('The MacAddress of the neighboring switch.')
agentIpv6NetworkPortNbrState = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 12, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 6))).clone(namedValues=NamedValues(("reachable", 1), ("stale", 2), ("delay", 3), ("probe", 4), ("unknown", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6NetworkPortNbrState.setStatus('current')
if mibBuilder.loadTexts: agentIpv6NetworkPortNbrState.setDescription('The state of the neighboring switch: reachable(1) - The neighbor is reachable by this switch. stale(2) - Information about the neighbor is scheduled for deletion. delay(3) - No information has been received from neighbor during delay period. probe(4) - Switch is attempting to probe for this neighbor. unknown(6) - Unknown status.')
agentIpv6NetworkPortNbrUpdated = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 12, 1, 1, 4), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6NetworkPortNbrUpdated.setStatus('current')
if mibBuilder.loadTexts: agentIpv6NetworkPortNbrUpdated.setDescription('The last sysUpTime that this neighbor has been updated.')
agentIpv6NetworkPortNbrIsRouter = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 12, 1, 1, 5), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6NetworkPortNbrIsRouter.setStatus('current')
if mibBuilder.loadTexts: agentIpv6NetworkPortNbrIsRouter.setDescription('Returns true(1) if the neighbor machine is a router, false(2) otherwise.')
agentIpv6NetworkPortNbrType = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 12, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("dynamic", 2), ("static", 3), ("local", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIpv6NetworkPortNbrType.setStatus('current')
if mibBuilder.loadTexts: agentIpv6NetworkPortNbrType.setDescription("The type of the mapping. The 'dynamic(2)' type indicates that the IPv6 address to physical addresses mapping has been dynamically resolved using the IPv6 Neighbor Discovery protocol. The static(3)' types indicates that the mapping has been statically configured. The local(4) indicates that the mapping is provided for an entity's own interface address.")
agentIpv6NetworkPortNbrCfgTable = MibTable((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 12, 2), )
if mibBuilder.loadTexts: agentIpv6NetworkPortNbrCfgTable.setStatus('current')
if mibBuilder.loadTexts: agentIpv6NetworkPortNbrCfgTable.setDescription('The table contains an entry for each static IPv6 Neighbor on the Network Port.')
agentIpv6NetworkPortNbrCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 12, 2, 1), ).setIndexNames((0, "EdgeSwitch-ROUTING6-MIB", "agentIpv6NetworkPortNbrCfgAddr"))
if mibBuilder.loadTexts: agentIpv6NetworkPortNbrCfgEntry.setStatus('current')
if mibBuilder.loadTexts: agentIpv6NetworkPortNbrCfgEntry.setDescription('An entry of the static IPv6 Neighbor on the Network Port.')
agentIpv6NetworkPortNbrCfgAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 12, 2, 1, 1), Ipv6Address())
if mibBuilder.loadTexts: agentIpv6NetworkPortNbrCfgAddr.setStatus('current')
if mibBuilder.loadTexts: agentIpv6NetworkPortNbrCfgAddr.setDescription('The Ipv6 Address of a static neighbor on the Network Port.')
agentIpv6NetworkPortNbrCfgPhysAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 12, 2, 1, 2), MacAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentIpv6NetworkPortNbrCfgPhysAddr.setStatus('current')
if mibBuilder.loadTexts: agentIpv6NetworkPortNbrCfgPhysAddr.setDescription('The MAC Address of a static neighbor on the Network Port.')
agentIpv6NetworkPortNbrCfgEntryStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 12, 2, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentIpv6NetworkPortNbrCfgEntryStatus.setStatus('current')
if mibBuilder.loadTexts: agentIpv6NetworkPortNbrCfgEntryStatus.setDescription('Create or delete the static neighbor entry on the Network Port. The configured static neighbor entry is always active.')
agentIpv6NbrCfgTable = MibTable((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 13), )
if mibBuilder.loadTexts: agentIpv6NbrCfgTable.setStatus('current')
if mibBuilder.loadTexts: agentIpv6NbrCfgTable.setDescription('The table contains an entry for each static IPv6 Neighbor on the Network Port.')
agentIpv6NbrCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 13, 1), ).setIndexNames((0, "EdgeSwitch-ROUTING6-MIB", "agentIpv6IfIndex"), (0, "EdgeSwitch-ROUTING6-MIB", "agentIpv6NbrCfgAddr"))
if mibBuilder.loadTexts: agentIpv6NbrCfgEntry.setStatus('current')
if mibBuilder.loadTexts: agentIpv6NbrCfgEntry.setDescription('An entry of the static IPv6 Neighbor on the Network Port.')
agentIpv6IfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 13, 1, 1), Ipv6IfIndex())
if mibBuilder.loadTexts: agentIpv6IfIndex.setStatus('current')
if mibBuilder.loadTexts: agentIpv6IfIndex.setDescription('A unique non-zero value identifying the particular IPv6 interface.')
agentIpv6NbrCfgAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 13, 1, 2), Ipv6Address())
if mibBuilder.loadTexts: agentIpv6NbrCfgAddr.setStatus('current')
if mibBuilder.loadTexts: agentIpv6NbrCfgAddr.setDescription('The Ipv6 Address of a static neighbor on the Routing or Host interface.')
agentIpv6NbrCfgPhysAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 13, 1, 3), MacAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentIpv6NbrCfgPhysAddr.setStatus('current')
if mibBuilder.loadTexts: agentIpv6NbrCfgPhysAddr.setDescription('The MAC Address of a static neighbor on the Routing or Host interface.')
agentIpv6NbrCfgEntryStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 13, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentIpv6NbrCfgEntryStatus.setStatus('current')
if mibBuilder.loadTexts: agentIpv6NbrCfgEntryStatus.setDescription('Create or delete the static neighbor entry on the Routing or Host interface. The configured static neighbor entry is always active.')
agentIpv6NeighborsDynamicRenew = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6NeighborsDynamicRenew.setStatus('current')
if mibBuilder.loadTexts: agentIpv6NeighborsDynamicRenew.setDescription('Enables/disables the periodic NUD (neighbor unreachability detection) to be run on the existing IPv6 neighbor entries based on the activity of the entries in the hardware. If the setting is disabled, only those entries that are actively used in the hardware are triggered for NUD. If the setting is enabled, all the entries are triggered for NUD irrespective of their usage in the hardware.')
agentIpv6UnresolvedDataRateLimit = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 15), Integer32().subtype(subtypeSpec=ValueRangeConstraint(50, 1024)).clone(1024)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6UnresolvedDataRateLimit.setStatus('current')
if mibBuilder.loadTexts: agentIpv6UnresolvedDataRateLimit.setDescription('The rate in packets-per-second for the number of IPv6 data packets trapped to CPU when the packet fails to be forwarded in the hardware due to unresolved hardware address of the destined IPv6 node.')
agentIpv6NUDMaxUnicastSolicits = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(3, 10)).clone(3)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6NUDMaxUnicastSolicits.setStatus('current')
if mibBuilder.loadTexts: agentIpv6NUDMaxUnicastSolicits.setDescription('The maximum number of unicast Neighbor Solicitations sent during neighbor resolution or during NUD (neighbor unreachabililty detection) before switching to multicast Neighbor Solicitations.')
agentIpv6NUDMaxMulticastSolicits = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 17), Integer32().subtype(subtypeSpec=ValueRangeConstraint(3, 255)).clone(3)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6NUDMaxMulticastSolicits.setStatus('current')
if mibBuilder.loadTexts: agentIpv6NUDMaxMulticastSolicits.setDescription('The maximum number of multicast Neighbor Solicitations sent during neighbor resolution or during NUD (neighbor unreachabililty detection).')
agentIpv6NUDBackoffMultiple = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 1, 30, 1, 18), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 5)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIpv6NUDBackoffMultiple.setStatus('current')
if mibBuilder.loadTexts: agentIpv6NUDBackoffMultiple.setDescription('The exponential backoff multiple to be used in the calculation of the next timeout value for Neighbor Solicitation transmission during NUD (neighbor unreachabililty detection) following the exponential backoff algorithm. The next timeout value is limited to a maximum value of 60 seconds if the value with exponential backoff calculation is greater than 60 seconds.')
mibBuilder.exportSymbols("EdgeSwitch-ROUTING6-MIB", agentIpv6StaticRoutePreference=agentIpv6StaticRoutePreference, agentDhcp6ClientInterfaceIndex=agentDhcp6ClientInterfaceIndex, agentDhcp6ClientParametersTable=agentDhcp6ClientParametersTable, agentIpv6ServicePortNbrUpdated=agentIpv6ServicePortNbrUpdated, agentIpv6EcmpGroupsHigh=agentIpv6EcmpGroupsHigh, agentIpv6AddrAddress=agentIpv6AddrAddress, agentIpv6StaticRouteTable=agentIpv6StaticRouteTable, agentIpv6InterfaceDhcpClient=agentIpv6InterfaceDhcpClient, agentIpv6NetworkPortNbrEntry=agentIpv6NetworkPortNbrEntry, agentIpv6RouterAdvertisementMaxAdvertisementInterval=agentIpv6RouterAdvertisementMaxAdvertisementInterval, agentIpv6RouterAdvertisementReachableTime=agentIpv6RouterAdvertisementReachableTime, agentIpv6StaticRoutePfxLength=agentIpv6StaticRoutePfxLength, agentIpv6OspfInterRoutes=agentIpv6OspfInterRoutes, agentIpv6EbgpRoutes=agentIpv6EbgpRoutes, agentIpv6EcmpRetries=agentIpv6EcmpRetries, agentIpv6NetworkPortNbrType=agentIpv6NetworkPortNbrType, agentIpv6RouterAdvertisementHopLimitUnspecifiedMode=agentIpv6RouterAdvertisementHopLimitUnspecifiedMode, agentIpv6InterfaceIfIndex=agentIpv6InterfaceIfIndex, agentIpv6ServicePortNbrIsRouter=agentIpv6ServicePortNbrIsRouter, agentIpv6UniqueNextHopsHigh=agentIpv6UniqueNextHopsHigh, agentIpv6NUDBackoffMultiple=agentIpv6NUDBackoffMultiple, agentIpv6StaticRouteDest=agentIpv6StaticRouteDest, agentIpv6TotalRoutes=agentIpv6TotalRoutes, agentIpv6RouterAdvertisementNbrSolicitInterval=agentIpv6RouterAdvertisementNbrSolicitInterval, agentIpv6ServicePortDefaultRouterEntry=agentIpv6ServicePortDefaultRouterEntry, agentIpv6InterfaceEntry=agentIpv6InterfaceEntry, agentIpv6UnresolvedRouteAdds=agentIpv6UnresolvedRouteAdds, agentIpv6IcmpControlGroup=agentIpv6IcmpControlGroup, agentIpv6StaticRouteIfIndex=agentIpv6StaticRouteIfIndex, agentIpv6AddrPrefixOnLinkFlag=agentIpv6AddrPrefixOnLinkFlag, agentIpv6StaticRouteStatus=agentIpv6StaticRouteStatus, agentIpv6InterfaceLinkLocalOnly=agentIpv6InterfaceLinkLocalOnly, agentIpv6InterfaceAutoconfig=agentIpv6InterfaceAutoconfig, agentIpv6RouterAdvertisementEntry=agentIpv6RouterAdvertisementEntry, agentIpv6OspfExt2Routes=agentIpv6OspfExt2Routes, agentIpv6NbrCfgEntry=agentIpv6NbrCfgEntry, agentIpv6NetworkPortNbrCfgPhysAddr=agentIpv6NetworkPortNbrCfgPhysAddr, agentIpv6AddrTable=agentIpv6AddrTable, agentIpv6AddrPrefixAdvValidLifetime=agentIpv6AddrPrefixAdvValidLifetime, agentIpv6AddrPrefixAutonomousFlag=agentIpv6AddrPrefixAutonomousFlag, agentIpv6ServicePortNbrCfgEntry=agentIpv6ServicePortNbrCfgEntry, agentIpv6AddrPrefixTable=agentIpv6AddrPrefixTable, agentIpv6IcmpRateLimitBurstSize=agentIpv6IcmpRateLimitBurstSize, agentIpv6NetworkPortNbrPhysAddr=agentIpv6NetworkPortNbrPhysAddr, agentIpv6StaticRouteEntry=agentIpv6StaticRouteEntry, agentIpv6InterfaceTable=agentIpv6InterfaceTable, agentIpv6ServicePortNbrEntry=agentIpv6ServicePortNbrEntry, agentIpv6NetworkPortNbrIsRouter=agentIpv6NetworkPortNbrIsRouter, agentIpv6ServicePortNbrPhysAddr=agentIpv6ServicePortNbrPhysAddr, agentIpv6NetworkPortNbrCfgAddr=agentIpv6NetworkPortNbrCfgAddr, agentIpv6ServicePortDefaultRouterIndex=agentIpv6ServicePortDefaultRouterIndex, agentIpv6Group=agentIpv6Group, agentIpv6EcmpRouteCount=agentIpv6EcmpRouteCount, agentDhcp6ClientPrefix=agentDhcp6ClientPrefix, agentIpv6ServicePortNbrCfgTable=agentIpv6ServicePortNbrCfgTable, agentIpv6NetworkPortNbrCfgEntry=agentIpv6NetworkPortNbrCfgEntry, agentIpv6InvalidRouteAdds=agentIpv6InvalidRouteAdds, agentIpv6ServicePortNbrType=agentIpv6ServicePortNbrType, agentIpv6RouterAdvertisementSuppressMode=agentIpv6RouterAdvertisementSuppressMode, agentIpv6NUDMaxMulticastSolicits=agentIpv6NUDMaxMulticastSolicits, agentIpv6ServicePortDefaultRouter=agentIpv6ServicePortDefaultRouter, agentIpv6RoutingTableSummaryGroup=agentIpv6RoutingTableSummaryGroup, agentIpv6ServicePortPrefixIndex=agentIpv6ServicePortPrefixIndex, agentDhcp6ClientIAID=agentDhcp6ClientIAID, agentIpv6ServicePortGroup=agentIpv6ServicePortGroup, agentIpv6AddrPrefixLength=agentIpv6AddrPrefixLength, agentIpv6NbrCfgAddr=agentIpv6NbrCfgAddr, agentIpv6AddrStatus=agentIpv6AddrStatus, agentIpv6ServicePortPrefixEntry=agentIpv6ServicePortPrefixEntry, agentIpv6BgpRoutes=agentIpv6BgpRoutes, agentIpv6UniqueNextHops=agentIpv6UniqueNextHops, agentIpv6IbgpRoutes=agentIpv6IbgpRoutes, agentIpv6RouterAdvertisementAdvertisementLifetime=agentIpv6RouterAdvertisementAdvertisementLifetime, agentIpv6NUDMaxUnicastSolicits=agentIpv6NUDMaxUnicastSolicits, agentIpv66to4Routes=agentIpv66to4Routes, agentIpv6RouterAdvertisementManagedFlag=agentIpv6RouterAdvertisementManagedFlag, agentIpv6NbrCfgTable=agentIpv6NbrCfgTable, agentIpv6NbrCfgEntryStatus=agentIpv6NbrCfgEntryStatus, agentIpv6NeighborsDynamicRenew=agentIpv6NeighborsDynamicRenew, agentIpv6ServicePortNbrCfgAddr=agentIpv6ServicePortNbrCfgAddr, agentIpv6EcmpCountEntry=agentIpv6EcmpCountEntry, agentIpv6NbrCfgPhysAddr=agentIpv6NbrCfgPhysAddr, agentIpv6ServicePortPrefixTable=agentIpv6ServicePortPrefixTable, agentDhcp6ClientValidLifeTime=agentDhcp6ClientValidLifeTime, agentIpv6RouteAdds=agentIpv6RouteAdds, agentDhcp6ClientRenewTime=agentDhcp6ClientRenewTime, agentIpv6EcmpCountTable=agentIpv6EcmpCountTable, agentIpv6OspfRoutes=agentIpv6OspfRoutes, agentDhcp6ClientParametersEntry=agentDhcp6ClientParametersEntry, agentIpv6ServicePortNbrTable=agentIpv6ServicePortNbrTable, agentIpv6AlternateRoutes=agentIpv6AlternateRoutes, agentIpv6AddrPfxLength=agentIpv6AddrPfxLength, agentIpv6ServicePortPrefix=agentIpv6ServicePortPrefix, agentIpv6RouterAdvertisementIfIndex=agentIpv6RouterAdvertisementIfIndex, agentIpv6ServicePortNbrCfgEntryStatus=agentIpv6ServicePortNbrCfgEntryStatus, agentIpv6NetworkPortNbrTable=agentIpv6NetworkPortNbrTable, agentIpv6TruncEcmpRoutes=agentIpv6TruncEcmpRoutes, agentIpv6OspfIntraRoutes=agentIpv6OspfIntraRoutes, agentIpv6RoutingMode=agentIpv6RoutingMode, agentIpv6NetworkPortNbrUpdated=agentIpv6NetworkPortNbrUpdated, PYSNMP_MODULE_ID=fastPathRoutingIpv6, agentIpv6AddrEui64Flag=agentIpv6AddrEui64Flag, agentIpv6IcmpRateLimitInterval=agentIpv6IcmpRateLimitInterval, agentIpv6NetworkPortNbrAddr=agentIpv6NetworkPortNbrAddr, agentIpv6NetworkPortNbrState=agentIpv6NetworkPortNbrState, agentIpv6EcmpNextHopCount=agentIpv6EcmpNextHopCount, agentIpv6InterfaceIcmpUnreachables=agentIpv6InterfaceIcmpUnreachables, agentIpv6RejectRoutes=agentIpv6RejectRoutes, agentIpv6UnresolvedDataRateLimit=agentIpv6UnresolvedDataRateLimit, agentDhcp6ClientExpireTime=agentDhcp6ClientExpireTime, agentIpv6NetworkPortGroup=agentIpv6NetworkPortGroup, agentIpv6EcmpRoutes=agentIpv6EcmpRoutes, agentIpv6NetworkPortNbrCfgEntryStatus=agentIpv6NetworkPortNbrCfgEntryStatus, agentDhcp6ClientPrefixlength=agentDhcp6ClientPrefixlength, agentIpv6AddrPrefixAdvPreferredLifetime=agentIpv6AddrPrefixAdvPreferredLifetime, agentIpv6ConnectedRoutes=agentIpv6ConnectedRoutes, agentIpv6NextHopGroups=agentIpv6NextHopGroups, agentDhcp6ClientT1Time=agentDhcp6ClientT1Time, agentIpv6StaticRoutes=agentIpv6StaticRoutes, agentDhcp6ClientState=agentDhcp6ClientState, agentIpv6AddrPrefixEntry=agentIpv6AddrPrefixEntry, agentDhcp6ClientT2Time=agentDhcp6ClientT2Time, agentIpv6ServicePortNbrAddr=agentIpv6ServicePortNbrAddr, agentIpv6ReservedLocals=agentIpv6ReservedLocals, agentIpv6LocalBgpRoutes=agentIpv6LocalBgpRoutes, agentIpv6BestRoutes=agentIpv6BestRoutes, agentIpv6InterfaceIcmpRedirects=agentIpv6InterfaceIcmpRedirects, agentIpv6BestRoutesHigh=agentIpv6BestRoutesHigh, agentIpv6InterfaceMtuValue=agentIpv6InterfaceMtuValue, agentIpv6ServicePortPrefixLength=agentIpv6ServicePortPrefixLength, agentIpv6RouterAdvertisementOtherFlag=agentIpv6RouterAdvertisementOtherFlag, agentIpv6RouterAdvertisementTable=agentIpv6RouterAdvertisementTable, agentIpv6ServicePortNbrState=agentIpv6ServicePortNbrState, agentIpv6RouteDeletes=agentIpv6RouteDeletes, fastPathRoutingIpv6=fastPathRoutingIpv6, agentIpv6ServicePortDefaultRouterTable=agentIpv6ServicePortDefaultRouterTable, agentIpv6NetworkPortNbrCfgTable=agentIpv6NetworkPortNbrCfgTable, agentIpv6NextHopGroupsHigh=agentIpv6NextHopGroupsHigh, agentIpv6FailedRouteAdds=agentIpv6FailedRouteAdds, agentIpv6ServicePortNbrCfgPhysAddr=agentIpv6ServicePortNbrCfgPhysAddr, agentIpv6AddrPrefix=agentIpv6AddrPrefix, agentIpv6StaticRouteNextHop=agentIpv6StaticRouteNextHop, agentIpv6RouteModifies=agentIpv6RouteModifies, agentDhcp6ClientServerDUID=agentDhcp6ClientServerDUID, agentIpv6InterfaceDadTransmits=agentIpv6InterfaceDadTransmits, agentIpv6EcmpGroups=agentIpv6EcmpGroups, agentIpv6IfIndex=agentIpv6IfIndex, agentIpv6AddrEntry=agentIpv6AddrEntry, agentIpv6OspfExt1Routes=agentIpv6OspfExt1Routes, agentDhcp6ClientPreferredLifeTime=agentDhcp6ClientPreferredLifeTime)
|
py | 1a390969f3441df2b9d75d6bd67f9cdce66d9155 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 14 14:04:34 2021
@author: mesar
"""
import numpy as np
import pandas as pd
from lib import utils
from progressbar import progressbar as pbar
from pathlib import Path
import itertools
from time import time
import csv
if __name__ == "__main__":
Path("../data/model_build_outputs/all_prob_matrices_heading").mkdir(parents=True, exist_ok=True)
Path("../data/model_build_outputs/prob_matrices_heading").mkdir(parents=True, exist_ok=True)
Path("../data/model_build_outputs/prob_matrices_angle").mkdir(parents=True, exist_ok=True)
print("Calculating train_routes")
#train_routes = utils.get_train_routes()
routes = utils.get_routes()
hroutes = np.unique(routes.route_fid)
all_zroutes = utils.get_routes_as_zones()
zroutes = all_zroutes[all_zroutes.route_fid.isin(hroutes)]
print("Done reading routes")
t0 = time()
max_distances = [50, 100, 150, 200, 250, 300]
dwks = [0.01, 0.05, 0.1, 0.15]
r = []
for max_distance, dwk in itertools.product(max_distances, dwks):
tt = time()
#print ("\n----------\n%3d"%max_distance, "%.2f"%dwk, end=" || ", flush=True)
za = utils.ZrouteField(zroutes, max_distance=max_distance).compute_field(dwk=dwk, use_pbar=True)
h = za.get_estimated_headings(use_pbar=True)
rr = za.heading_estimations_cosdistance(h)
rr['max_distance'] = max_distance
rr['dwk'] = dwk
rr['zones_estimated'] = np.mean(h.cos_distance!=0)
rr['time'] = time()-t0
rr['nroutes'] = len(np.unique(za.zroutes.route_fid))
t0 = time()
r.append(rr)
print ("maxd %3d, "%max_distance, "dwk %.2f, "%dwk, f'time {time()-tt:.4f}, cos_sim {rr["cos_distance_mean"]:.4f}', flush=True)
r = pd.DataFrame(r)
r.to_hdf("../data/model_build_outputs/md_dkw_exploration.hdf", "data")
dwks = np.sort(np.unique(r.dwk))
max_distances = np.sort(np.unique(r.max_distance))
csims = np.zeros((len(dwks), len(max_distances)))
zcovered = np.zeros((len(dwks), len(max_distances)))
for i,dwk in enumerate(dwks):
for j,max_distance in enumerate(max_distances):
k = r[(r.max_distance==max_distance)&(r.dwk==dwk)].iloc[0]
csims[i,j] = k.cos_distance_mean
zcovered[i,j] = k.zones_estimated
for distance in max_distances:
k = r[r.max_distance==distance]
print(k)
estimated_zones_value = 1.0
best_options = r[r.zones_estimated >= estimated_zones_value]
if not best_options.empty:
best_options = r[r.zones_estimated >= estimated_zones_value]
best_combination = best_options[best_options.cos_distance_mean == best_options.cos_distance_mean.max()]
selected_max_distance = best_combination.max_distance.values[0]
selected_dwk = best_combination.dwk.values[0]
while best_options.empty:
print("Empty for value: " + str(estimated_zones_value))
estimated_zones_value = estimated_zones_value - 0.1
best_options = r[r.zones_estimated >= estimated_zones_value]
best_combination = best_options[best_options.cos_distance_mean == best_options.cos_distance_mean.max()]
selected_max_distance = best_combination.max_distance.values[0]
selected_dwk = best_combination.dwk.values[0]
print(selected_max_distance)
print(selected_dwk)
output_path = "../data/model_build_outputs/best_max_distance.csv"
with open(output_path, "w") as file:
writer = csv.writer(file, delimiter=',')
writer.writerow([selected_max_distance])
output_path = "../data/model_build_outputs/best_dwk.csv"
with open(output_path, "w") as file:
writer = csv.writer(file, delimiter=',')
writer.writerow([selected_dwk])
print("Max distance: " + str(selected_max_distance))
print("dwk: " + str(selected_dwk))
print("Calculating train_routes")
train_routes = utils.get_routes()
print("Calculating train_zroutes")
train_zroutes = utils.get_routes_as_zones()
print("Calculating z_route_fields")
za = utils.ZrouteField(train_zroutes, max_distance=selected_max_distance).compute_field(dwk=selected_dwk)
print("Calculating heading_matrices")
h = za.get_estimated_headings(zroutes=train_zroutes)
fname = f'../data/model_build_outputs/heading_estimations_md_{selected_max_distance}_dwk_{selected_dwk:.4f}.hdf'
h.to_hdf(fname, "data")
#h = pd.read_hdf("../data/model_apply_outputs/heading_estimations_md_200_dwk_0.1000.hdf")
zroutes = train_zroutes.copy()
print("Calculating prob_matrices")
for route_fid in pbar(np.unique(h.route_fid)):
probs = utils.get_heading_based_probmatrix(h, route_fid)
probs = probs[~probs.index.str.contains("Station")]
#probs.drop(probs.filter(regex='Station').columns, axis=1, inplace=True)
probs.to_csv(f"../data/model_build_outputs/prob_matrices_heading/{route_fid}_probs.csv", sep=',', na_rep='nan')
zones_id = zroutes.zone_id[zroutes.route_fid==route_fid]
zones_id = zones_id[~zones_id.str.contains("Station")]
zones_id.reset_index(inplace=True, drop=True)
cities = zroutes.city[zroutes.route_fid==route_fid]
cities.reset_index(inplace=True, drop=True)
city = cities[0]
city_size = len(city) + 2
zones_id = [zones_id[i][city_size:] for i in range(0,len(zones_id))] #Empieza desde 1 para saltarse del Depot
zones_df = pd.Series(zones_id)
zones_df = zones_df.append(pd.Series("nan"))
zones_df.to_csv(f"../data/model_build_outputs/prob_matrices_heading/{route_fid}_zroutes.csv", index=False, header=False, na_rep='nan')
prob_matrix = utils.get_angle_based_probmatrix(h, route_fid)
prob_matrix.to_csv(f"../data/model_build_outputs/prob_matrices_angle/{route_fid}_probs.csv", sep=',', na_rep='nan')
#probs.to_hdf(f"data/prob_matrices_based_on_heading/{route_fid}_probs.hdf", "data")
print("Done")
|
py | 1a3909a92b76027c50c9615618d1741d7c9b22db | import matplotlib
import re
import custom_style
from custom_style import setup_columns,col,remove_chart_junk
import matplotlib.pyplot as plt
import sys
import numpy as np
from matplotlib.ticker import FuncFormatter
import math
from collections import defaultdict
from matplotlib.patches import Patch
import scipy.special
from scipy.special import lambertw
lb_1_name = "micro_balancer_make_batch.dat"
lb_2_name = "micro_balancer_match_resps.dat"
suboram_name = "micro_suboram_batch_sz.dat"
labels = ["Load balancer (make batch)", "SubORAM (process batch)", "Load balancer (match responses)"]
#colors=[custom_style.mix_colors[2], custom_style.hash_colors[4], custom_style.hash_colors[1], custom_style.hash_colors[0]]
colors=["#FFCA3E","#FF6F50","#D03454"]
suborams = 1
data_size = 2**10
def getLoadBalancerData(filename):
results = []
f1 = open(filename, "r")
lines_1 = f1.readlines()
for i in range(len(lines_1)):
elems_1 = lines_1[i].split()
result = {
"suborams": int(elems_1[0]),
"requests": int(elems_1[1]),
"latency": (float(elems_1[2])) / 1000000.0,
}
results.append(result)
f1.close()
return results
def getSuboramData():
results = []
with open(suboram_name, "r") as f:
lines = f.readlines()
for line in lines:
elems = line.split()
result = {
"data_size": int(elems[0]),
"batch": int(elems[1]),
"latency": float(elems[2]) / 1000.0,
}
results.append(result)
return results
def f(N, n_suborams, secparam=128):
mu = N / n_suborams
alpha = math.log(n_suborams * (2 ** secparam))
rhs = alpha / (math.e * mu) - 1 / math.e
branch = 0
epsilon = math.e ** (lambertw(rhs, branch) + 1) - 1
#epsilon = (alpha + math.sqrt(2 * mu * alpha)) / mu # uncomment for looser bound
#print(alpha, rhs, lambertw(rhs, 0), lambertw(rhs, 1))
#print("bound", suborams, secparam, alpha, rhs, lambertw(rhs), epsilon)
return mu * (1 + epsilon)
def getLoadBalancerLatencyForParams(data, suborams, requests):
for elem in data:
if elem["suborams"] == suborams and elem["requests"] == requests:
return elem["latency"]
print(("load balancer out-of-bounds params: no latency for params suborams=%d, requests=%d") % (suborams, requests))
return -1.0
def getSuboramLatencyForParams(data, data_size, batch):
for elem in data:
if elem["data_size"] == data_size and elem["batch"] == batch:
return elem["latency"]
print(("suboram out-of-bounds params: no latency for params data_size=%d, batch=%d") % (data_size, batch))
return -1.0
def roundUpPow2(x):
return 2 ** (math.ceil(math.log(x,2)))
def makeBreakdownFig(in_name, out_name, data_size, title, args):
lb_1_data = getLoadBalancerData(lb_1_name)
lb_2_data = getLoadBalancerData(lb_2_name)
suboram_data = getSuboramData()
lb1_plt = []
lb2_plt = []
suboram_plt = []
reqs_plt = [2**i for i in range(6,11)]
for reqs in reqs_plt:
lb1_plt.append(getLoadBalancerLatencyForParams(lb_1_data,suborams,reqs) * 1000)
lb2_plt.append(getLoadBalancerLatencyForParams(lb_2_data,suborams,reqs) * 1000)
batch_size_rounded = roundUpPow2(f(reqs,suborams))
suboram_plt.append(getSuboramLatencyForParams(suboram_data,data_size,reqs) * 1000)
fig = plt.figure(figsize = (8,8))
ax = fig.add_subplot(111)
ax.stackplot(reqs_plt, lb1_plt, suboram_plt, lb2_plt, labels=labels, colors=colors)
#ax.stackplot(np.arange(10, 110, step=10), y[0], y[1], y[2], y[3], labels=labels, colors=colors)
ax.set_xlabel("Requests")
ax.set_ylabel("Process time (ms)")
#ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xticks([2**6, 2**8, 2**10])
ax.set_xticklabels(["$2^6$", "$2^8$", "$2^{10}$"])
#ax.set_title(title, fontsize=8)
print("updated")
#plt.legend()
#ax.spines['left'].set_position("zero")
#ax.spines['bottom'].set_position("zero")
remove_chart_junk(plt,ax,lightGrid=True,below=False)
pgf_with_pdflatex = {
"pgf.texsystem": "pdflatex",
"pgf.preamble": [
r"""
% \input{../fonts}
\usepackage[T1]{fontenc}
\newcommand\hmmax{0}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{mathptmx}
""",
],
"text.usetex": True,
"font.family": "serif",
"font.serif": [],
"font.sans-serif": [],
"font.monospace": [],
"axes.labelsize": 7,
"font.size": 10,
"legend.fontsize": 7,
"xtick.labelsize": 7,
"ytick.labelsize": 7,
"lines.markersize": 3,
"lines.markeredgewidth": 0,
"axes.linewidth": 0.5,
}
matplotlib.rcParams.update(pgf_with_pdflatex)
#ax.yaxis.grid(which='major', color='0.9', linestyle='dotted')
if args.title:
ax.set_title(args.title, y=1.5)
if args.large:
plt.legend(bbox_to_anchor=(0,1.02,1,0.2), loc="lower left",
mode="expand", borderaxespad=0)
custom_style.save_fig(fig, out_name, [2.5, 3], pad=0.3)
else:
custom_style.save_fig(fig, out_name, [1.3, 1.4])
#custom_style.save_fig(fig, out_name, [3.25, 1.8])
#plt.show()
|
py | 1a390aa5b08355655dffe5d1e8ead6a93d418d45 | #!/usr/bin/env python3
#
# Copyright (C) 2018 Bloomberg LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# <http://www.apache.org/licenses/LICENSE-2.0>
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
from buildgrid._version import __version__
if sys.version_info[0] != 3 or sys.version_info[1] < 5:
print("BuildGrid requires Python >= 3.5")
sys.exit(1)
try:
from setuptools import setup, find_packages, Command
except ImportError:
print("BuildGrid requires setuptools in order to build. Install it using"
" your package manager (usually python3-setuptools) or via pip (pip3"
" install setuptools).")
sys.exit(1)
class BuildGRPC(Command):
"""Command to generate project *_pb2.py modules from proto files."""
description = 'build gRPC protobuf modules'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
import grpc_tools.command
except ImportError:
print("BuildGrid requires grpc_tools in order to build gRPC modules.\n"
"Install it via pip (pip3 install grpcio-tools).")
exit(1)
protos_root = 'buildgrid/_protos'
grpc_tools.command.build_package_protos(protos_root)
# Postprocess imports in generated code
for root, _, files in os.walk(protos_root):
for filename in files:
if filename.endswith('.py'):
path = os.path.join(root, filename)
with open(path, 'r') as f:
code = f.read()
# All protos are in buildgrid._protos
code = re.sub(r'^from ', r'from buildgrid._protos.',
code, flags=re.MULTILINE)
# Except for the core google.protobuf protos
code = re.sub(r'^from buildgrid._protos.google.protobuf', r'from google.protobuf',
code, flags=re.MULTILINE)
with open(path, 'w') as f:
f.write(code)
# Load main requirements from file:
with open('requirements.txt') as requirements_file:
install_requirements = requirements_file.read().splitlines()
auth_requirements = []
# Load 'auth' requirements from dedicated file:
if os.path.isfile('requirements.auth.txt'):
with open('requirements.auth.txt') as requirements_file:
auth_requirements = requirements_file.read().splitlines()
docs_requirements = []
# Load 'docs' requirements from dedicated file:
if os.path.isfile('requirements.docs.txt'):
with open('requirements.docs.txt') as requirements_file:
docs_requirements = requirements_file.read().splitlines()
tests_requirements = []
# Load 'tests' requirements from dedicated file:
if os.path.isfile('requirements.tests.txt'):
with open('requirements.tests.txt') as requirements_file:
tests_requirements = requirements_file.read().splitlines()
db_requirements = []
# Load 'db' requirements from dedicated file:
if os.path.isfile('requirements.db.txt'):
with open('requirements.db.txt') as requirements_file:
db_requirements = requirements_file.read().splitlines()
redis_requirements = []
# Load 'redis' requirements from dedicated file:
if os.path.isfile('requirements.redis.txt'):
with open('requirements.redis.txt') as requirements_file:
redis_requirements = requirements_file.read().splitlines()
setup(
name="BuildGrid",
version=__version__,
license="Apache License, Version 2.0",
description="A remote execution service",
cmdclass={
'build_grpc': BuildGRPC, },
packages=find_packages(),
package_data={'buildgrid.server.persistence.sql': ['alembic/*', 'alembic/**/*']},
python_requires='>= 3.5.3', # janus requirement
install_requires=install_requirements,
setup_requires=['pytest-runner'],
tests_require=tests_requirements,
extras_require={
'auth': auth_requirements,
'database': db_requirements,
'redis': redis_requirements,
'docs': docs_requirements,
'tests': tests_requirements, },
entry_points={
'console_scripts': [
'bgd = buildgrid._app:cli',
]
}
)
|
py | 1a390b63fb4fcfe9c014449a813cf668d8db2a61 | # -*- coding: utf-8 -*-
import os
import sys
import argparse
import time
main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.insert(0, main_path)
def call1(dataset, num_chunks, args):
call1_ = "python -m sklearnex fea.1.mmm_bopf_repr_fit.py "
call1_ += "{0} {1} --timestamp={2} --n_jobs={3} --num_chunks={4}".format(
dataset, args.config_file, args.timestamp, args.n_jobs, num_chunks
)
return call1_
def call2(dataset, num_chunks, args):
call2_ = "python -m sklearnex fea.2.mmm_bopf_repr_transform.py "
call2_ += "{0} {1} --timestamp={2} --n_jobs={3} --num_chunks={4}".format(
dataset, args.config_file, args.timestamp, args.n_jobs, num_chunks
)
return call2_
def call3(dataset, num_chunks, args):
call2_ = "python -m sklearnex fea.3.mmm_bopf_compact_fit.py "
call2_ += "{0} {1} --timestamp={2} --num_chunks={3}".format(dataset, args.config_file, args.timestamp, num_chunks)
return call2_
def call4(dataset, num_chunks, args):
call3_ = "python -m sklearnex fea.4.mmm_bopf_compact_transform.py "
call3_ += "{0} --timestamp={1} --num_chunks={2}".format(dataset, args.timestamp, num_chunks)
return call3_
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"config_file",
help="filename for method MMMBOPF configuration"
)
parser.add_argument(
'-fit',
'--fit_dataset',
default="plasticc_train",
help='Name of the dataset to fit.',
)
parser.add_argument(
'-transform',
'--transform_dataset',
nargs="+",
default=["plasticc_test", "plasticc_augment"],
help='List of datasets to Transform.'
)
parser.add_argument(
'-n_chunks',
'--num_chunks',
nargs="+",
default=[1, 100, 20],
help='The number of chunks to divide each dataset in order. '
'First num_chunk for fit_dataset, and the rest for transform_dataset',
)
parser.add_argument(
"-t",
"--timestamp",
type=str,
default=time.strftime("%Y%m%d-%H%M%S"),
help="timestamp for creating unique files"
)
parser.add_argument(
"-c",
"--compact_method",
type=str,
default="LSA",
help="The compact method to use, options are: LSA or MANOVA"
)
parser.add_argument(
'-n_jobs',
"--n_jobs",
type=int,
default=-1,
help="The number of process to run in parallel"
)
# RUNNING EXAMPLE
# python fea.pipeline.py optimal_config_lsa.json -fit plasticc_train -transform plasticc_test plasticc_augment_v3 -n_chunks 1 100 10 -c LSA -n_jobs 6
args = parser.parse_args()
c = args.compact_method # LSA MANOVA
# print("RUNNING 1.mmm_bopf_repr_fit.py for compact_method=%s, dataset=%s" % (c, args.fit_dataset))
# os.system(call1(args.fit_dataset, args.num_chunks[0], args))
# for dataset, num_chunks in zip(args.transform_dataset, args.num_chunks[1:]):
# print("RUNNING 2.mmm_bopf_repr_transform.py for compact_method=%s, dataset=%s" % (c, dataset))
# os.system(call2(dataset, int(num_chunks), args))
print("RUNNING 3.mmm_bopf_compact_fit.py for compact_method=%s, dataset=%s" % (c, args.fit_dataset))
os.system(call3(args.fit_dataset, args.num_chunks[0], args))
for dataset, num_chunks in zip(args.transform_dataset, args.num_chunks[1:]):
print("RUNNING 4.mmm_bopf_compact_transform.py for compact_method=%s, dataset=%s" % (c, dataset))
os.system(call4(dataset, num_chunks, args))
print("DONE!!")
print("TIMESTAMP: ", args.timestamp)
# RUNING EXAMPLE
# python pipeline.py plasticc_train plasticc_test optimal_config_lsa.json --compact_method=LSA --train_num_chunks=1 --test_num_chunks=200
# python pipeline.py plasticc_train plasticc_test optimal_config_lsa.json --compact_method=LSA --train_num_chunks=1 --test_num_chunks=100 --timestamp=20210916-035944 --n_jobs=6
# python pipeline.py plasticc_augment_v3 plasticc_test optimal_config_lsa.json --compact_method=LSA --train_num_chunks=10 --test_num_chunks=100 --n_jobs=6
|
py | 1a390ba4f1480db5e8e8d98602e71cee7b85e787 | import sys
class Dummy:
def __init__(self,identity = 'unknown'):
self.identity = identity
self.name = ''
def call(self,*args):
method = self.name + "("
count = 1
for o in args:
if count != 1:
method = method + ","
method = method + repr(type(o))
count = count + 1
method = method + ")"
try:
raise "Dummy"
except:
line = 'Line ' +repr(sys.exc_info()[2].tb_frame.f_back.f_lineno)+': '
raise AttributeError(line + method+" called on dummy "+self.identity+" Object\n")
def __getattr__(self, name):
self.name = name
return self.call
if __name__ == '__main__':
try:
rect = ''
rect = Dummy('Rectangle')#try also after commenting this line
rect.GetWidth()
rect.SetHeight(50)
rect.SetColor('Red')
except AttributeError,e:
print e
|
py | 1a390c0f1612a0f7d31c57d98eeb14f09ec5fe81 | from typing import TypeVar, Generic
from pydantic.generics import GenericModel
TypeX = TypeVar('TypeX')
TypeY = TypeVar('TypeY')
TypeZ = TypeVar('TypeZ')
class BaseClass(GenericModel, Generic[TypeX, TypeY]):
x: TypeX
y: TypeY
class ChildClass(BaseClass[int, TypeY], Generic[TypeY, TypeZ]):
z: TypeZ
# Replace TypeY by str
print(ChildClass[str, int](x=1, y='y', z=3))
|
py | 1a390c140808fb5bf09d099673be0871e7ca0247 | ##############################################################################
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the "license" file accompanying this file. This file is #
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY #
# KIND, express or implied. See the License for the specific language #
# governing permissions and limitations under the License. #
##############################################################################
from moto import mock_ssm
from utils.logger import Logger
from manifest.cfn_params_handler import CFNParamsHandler
from aws.services.ssm import SSM
log_level = 'info'
logger = Logger(loglevel=log_level)
cph = CFNParamsHandler(logger)
def test_update_alfred_ssm():
keyword_ssm = 'alfred_ssm_not_exist_alfred_ssm'
value_ssm = 'parameter_store_value'
value_ssm, param_flag = cph._update_alfred_ssm(
keyword_ssm, value_ssm, False)
assert param_flag is True
|
py | 1a390c5133cda8ef004b9d8fb89af9f2806c8f00 | #!/usr/bin/python
import argparse
import json
from collections import defaultdict
import sys
def parse_args():
parser = argparse.ArgumentParser(description='Read JSON lab data and generate statistics.')
parser.add_argument(
'--json-file', dest='json_file_string',
required=True,
help = 'JSON file describing jobs, from pulpito',
)
parser.add_argument(
'--suites', dest='suites_string',
required=True,
help = 'suites to grab statistics on, as a comma-separated string',
)
parser.add_argument(
'--machine-types', dest='machine_types_string',
required=True,
help = 'machine types to include, as a comma-separated string'
)
parser.add_argument(
'--include-failed', help='do not exclude non-passed runs',
action='store_true'
)
args = parser.parse_args()
args.suites = args.suites_string.split(',')
args.machine_types = args.machine_types_string.split(',')
def stringcheck(strings):
for entry in strings:
if not isinstance(entry, basestring):
print "'{entry}'' is not a string".format(entry=entry)
sys.exit(1)
stringcheck(args.suites)
stringcheck(args.machine_types)
return args
def parse_json_to_dict(ctx, data):
try:
json_data = json.loads(data)
except ValueError, e:
raise ValueError('could not parse json data')
d = defaultdict(dict) # suite -> run -> list of jobs
including = 0
for record in json_data:
if (record['suite'] in ctx.suites) and \
(record['status'] == "pass" or ctx.include_failed):
including += 1
run_name = record['job'].split('/')[0]
if not run_name in d[record['suite']]:
d[record['suite']][run_name] = list()
d[record['suite']][run_name].append(record)
print "filtered out {num} results for {suites}".format(num=including, suites=ctx.suites_string)
return d
def sum_data(suite_data):
"""suite_data: run -> [job1, job2, ...]
Returns two-element tuple; dict of suite names to total machine times, and
dict of job descriptions to list of runs
"""
suite_run_results = {}
job_results = defaultdict(list) # description -> [job1, job2, ...]
for run_name, jobs in suite_data.iteritems():
run_machine_time_sum = 0
for job in jobs:
run_machine_time_sum += job['duration'] * job['nodes']
job_results[job['description']].append(job)
suite_run_results[run_name] = run_machine_time_sum
return (suite_run_results, job_results)
def combine_job_results(job_results):
"""job_results: description -> [job1, job2, ...]
Returns a dict of job description -> tuple(total machine time, num runs, num machines)
"""
averaged_results = {} # description -> (total machine runtime, num runs, num machines)
for description, jobs in job_results.iteritems():
total_machine_time = 0
num_job_runs = 0
num_machines = 0
warned_on_change = False
for job in jobs:
total_machine_time += job['duration'] * job['nodes']
num_job_runs += 1
if num_machines is not 0 and num_machines != job['nodes'] and not warned_on_change:
print "{desc} changed required machine number".format(desc=description)
warned_on_change = True
num_machines = job['nodes']
averaged_results[description] = (total_machine_time, num_job_runs, num_machines)
return averaged_results
def print_suite_stats(suite_totals):
total_time = 0
largest_time = ("", 0)
run_count = 0
for run_name, time in suite_totals.iteritems():
total_time += time
if time > largest_time[1]:
largest_time = (run_name, time)
run_count += 1
print "Average machine runtime: {time} seconds".format(time=total_time/run_count)
print "Longest machine runtime: {name} in {time} seconds".format(
name=largest_time[0], time=largest_time[1] )
def print_job_stats(job_results):
print "(machine time, number of runs, machines used):description"
results_list = list()
for job_name, results in job_results.iteritems():
list_tuple = (job_name, results)
results_list.append(list_tuple)
results_list.sort(key=lambda result: int(result[1][0])/result[1][1])
for result_tuple in results_list:
results = result_tuple[1]
average = results[0]/results[1]
print "({avg},{num},{mcount}):{name}".format(name=result_tuple[0],avg=average,num=results[1],mcount=results[2])
if __name__ == '__main__':
ctx = parse_args()
try:
json_stream = open(ctx.json_file_string).read()
except IOError as e:
print 'cannot open %s' % json_stream
print e
sys.exit(1)
try:
suite_data = parse_json_to_dict(ctx, json_stream)
except ValueError, e:
print e
sys.exit(1)
for (suite_name, suite_results) in suite_data.iteritems():
(suite_total_times, job_results) = sum_data(suite_results)
combined_job_results = combine_job_results(job_results)
print "********** Results for suite {name} **********".format(name=suite_name)
print_suite_stats(suite_total_times)
print " ***** Job results ***** "
print_job_stats(combined_job_results)
|
py | 1a390c70a8d8d10a88ddd9f7eadba14c23eeb402 | """"Tests for generators_coroutines_*.py"""
from unittest import TestCase, main, mock
from generators_coroutines_1 import (
CustomException,
DBHandler,
stream_data,
stream_db_records,
)
from generators_coroutines_2 import auto_stream_db_records
from generators_coroutines_2 import stream_db_records as stream_db_records_2
class BaseTestCase(TestCase):
def setUp(self):
self.info = mock.patch("log.logger.info").start()
self.handler = DBHandler("test")
def tearDown(self):
self.info.stop()
class TestClose(BaseTestCase):
"""Tests for generators_coroutines_1"""
def test_close_called(self):
streamer = stream_db_records(self.handler)
rows = next(streamer)
streamer.close()
self.assertEqual(len(rows), 10)
self.assertTrue(self.handler.is_closed)
class TestThrow(BaseTestCase):
"""Tests for generators_coroutines_1"""
def test_throw_controlled_exception(self):
streamer = stream_data(self.handler)
self.assertEqual(len(next(streamer)), 10)
streamer.throw(CustomException)
self.assertEqual(len(next(streamer)), 10)
def test_unhandled_exception(self):
streamer = stream_data(self.handler)
self.assertEqual(len(next(streamer)), 10)
with self.assertRaises(StopIteration):
streamer.throw(RuntimeError)
self.assertTrue(self.handler.is_closed)
class TestStreamer(BaseTestCase):
"""Tests for generators_coroutines_2.stream_db_records."""
def test_default_value(self):
streamer = stream_db_records_2(self.handler)
none = next(streamer)
rows = next(streamer)
self.assertIsNone(none)
self.assertEqual(len(rows), 10)
def test_with_fixed_value(self):
streamer = stream_db_records_2(self.handler)
none = next(streamer)
rows = streamer.send(20)
self.assertIsNone(none)
self.assertEqual(len(rows), 20)
def test_multiple_values(self):
streamer = stream_db_records_2(self.handler)
none = next(streamer)
default_len = next(streamer)
self.assertIsNone(none)
self.assertEqual(len(default_len), 10)
self.assertEqual(len(streamer.send(20)), 20, "provided length of 20")
self.assertEqual(len(streamer.send(15)), 15, "provided length of 15")
self.assertEqual(
len(next(streamer)), 15, "no length provided use previous = 15"
)
self.assertEqual(
len(streamer.send(None)), 15, "nothing sent, use previous"
)
self.assertEqual(len(streamer.send(7)), 7, "new provided length")
def test_first_call_fixed_value(self):
streamer = stream_db_records_2(self.handler)
self.assertIsNone(next(streamer))
self.assertEqual(len(streamer.send(1)), 1)
class TestStreamer2(BaseTestCase):
"""Tests for generators_coroutines_2.auto_stream_db_records."""
def test_default_value(self):
streamer = auto_stream_db_records(self.handler)
rows = next(streamer)
self.assertEqual(len(rows), 10)
def test_with_fixed_value(self):
streamer = auto_stream_db_records(self.handler)
rows = streamer.send(20)
self.assertEqual(len(rows), 20)
def test_multiple_values(self):
streamer = auto_stream_db_records(self.handler)
default_len = next(streamer)
self.assertEqual(len(default_len), 10)
self.assertEqual(len(streamer.send(20)), 20, "provided length of 20")
self.assertEqual(len(streamer.send(15)), 15, "provided length of 15")
self.assertEqual(
len(next(streamer)), 15, "no length provided use previous = 15"
)
self.assertEqual(
len(streamer.send(None)), 15, "nothing sent, use previous"
)
self.assertEqual(len(streamer.send(7)), 7, "new provided length")
def test_first_call_fixed_value(self):
streamer = auto_stream_db_records(self.handler)
self.assertEqual(len(streamer.send(1)), 1)
if __name__ == "__main__":
main()
|
py | 1a390d11b28a7d6a188d93d76d16a3818bfe07cb | from matplotlib.pyplot import figure
from numpy import array, zeros
from scipy.integrate import solve_ivp
from .dynamics import Dynamics
from ..util import default_fig
class SystemDynamics(Dynamics):
"""Abstract dynamics class for simulation.
Override eval_dot.
"""
def __init__(self, n, m):
"""Create a SystemDynamics object.
Inputs:
Number of states, n: int
Number of actions, m: int
"""
self.n = n
self.m = m
def eval(self, x, t):
return x
def step(self, x_0, u_0, t_0, t_f, atol=1e-6, rtol=1e-6):
"""Simulate system from initial state with constant action over a
time interval.
Approximated using Runge-Kutta 4,5 solver.
Inputs:
Initial state, x_0: numpy array
Control action, u_0: numpy array
Initial time, t_0: float
Final time, t_f: float
Absolute tolerance, atol: float
Relative tolerance, rtol: float
Outputs:
State at final time: numpy array
"""
x_dot = lambda t, x: self.eval_dot(x, u_0, t)
t_span = [t_0, t_f]
res = solve_ivp(x_dot, t_span, x_0, atol=atol, rtol=rtol)
return res.y[:, -1]
def simulate(self, x_0, controller, ts, processed=True, atol=1e-6, rtol=1e-6):
"""Simulate system from initial state with specified controller.
Approximated using Runge-Kutta 4,5 solver.
Actions computed at time steps and held constant over sample period.
Inputs:
Initial state, x_0: numpy array
Control policy, controller: Controller
Time steps, ts: numpy array
Flag to process actions, processed: bool
Absolute tolerance, atol: float
Relative tolerance, rtol: float
Outputs:
State history: numpy array
Action history: numpy array
"""
#print("Dimension",self.n)
#print("State",x_0)
assert len(x_0) == self.n
N = len(ts)
xs = zeros((N, self.n))
us = [None] * (N - 1)
controller.reset()
xs[0] = x_0
for j in range(N - 1):
x = xs[j]
t = ts[j]
u = controller.eval(x, t)
us[j] = u
u = controller.process(u)
xs[j + 1] = self.step(x, u, t, ts[j + 1])
if processed:
us = array([controller.process(u) for u in us])
return xs, us
def plot_timeseries(self, ts, data, fig=None, ax=None, title=None, labels=None):
fig, ax = default_fig(fig, ax)
if title is not None:
ax.set_title(title, fontsize=16)
ax.set_xlabel('$t$ (sec)', fontsize=16)
ax.plot(ts, data, linewidth=3)
if labels is not None:
ax.legend(labels, fontsize=16)
return fig, ax
def plot_states(self, ts, xs, fig=None, ax=None, labels=None):
if labels is None:
labels = [f'$x_{i}$' for i in range(self.n)]
return self.plot_timeseries(ts, xs, fig, ax, 'States', labels)
def plot_actions(self, ts, us, fig=None, ax=None, labels=None):
if labels is None:
labels = [f'$u_{j}$' for j in range(self.m)]
return self.plot_timeseries(ts[:-1], us, fig, ax, 'Actions', labels)
def plot(self, xs, us, ts, fig=None, state_labels=None, action_labels=None):
if fig is None:
fig = figure(figsize=(12, 6), tight_layout=True)
state_ax = fig.add_subplot(1, 2, 1)
fig, state_ax = self.plot_states(ts, xs, fig, state_ax, state_labels)
action_ax = fig.add_subplot(1, 2, 2)
fig, action_ax = self.plot_actions(ts, us, fig, action_ax, action_labels)
return fig, (state_ax, action_ax)
|
py | 1a390db26588097071f421aaad2dbc8e112bd620 | import os
from fnmatch import fnmatch
import pickle
# General Processing
import numpy as np
import pandas as pd
import collections
# DECOMPOSITION
from sklearn.decomposition import NMF
from scipy.linalg import svd
# NLU
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from ibm_watson import NaturalLanguageUnderstandingV1 as NLUV1
# from ibm_watson.natural_language_understanding_v1 import \
# Features, ConceptsOptions, EntitiesOptions, KeywordsOptions
# Presentation / apps
import seaborn as sns
# GENERAL FUNCTIONS
# SELECTION
def random_split(lst, split=0.5):
shuffled = np.array(lst)
np.random.shuffle(shuffled)
split = int(split * len(shuffled))
return shuffled[-split:], shuffled[:-split]
# NORMALIZATION
def norm_stat(vec, weights=False):
'''
Normalizes a vector v-v.mean())/v.std()
'''
if weights:
return np.mean(abs(vec - vec.mean()))
return (vec-vec.mean())/vec.std()
# Algebraic normalization - dot product
def norm_dot(vec, weights=False):
'''
Normalizes a vector - dot product: v @ v = 1
'''
if weights:
return np.sqrt(vec @ vec)
return vec / np.sqrt(vec @ vec)
# Algebraic normalization - dot product
def norm_sum(vec, weights=False):
'''
Normalizes a vector - sum: v.sum = 1
'''
if weights:
return vec.sum()
return vec / vec.sum()
# Scaled Normalization -
def scale(vec, weights=False):
'''
Normalizes a vector: v.min = 0, v.max = 1
'''
stop_divide_by_zero = 0.00000001
if weights:
return (vec.max()-vec.min() + stop_divide_by_zero)
return (vec-vec.min())/(vec.max()-vec.min() + stop_divide_by_zero)
def cleanup_chars(string, char_list=('\n', ' ')):
result = string
for char in char_list:
result = result.replace(char, '')
return result
# Matrix dot product
def dotdf(df1, df2):
'''
performs df1 @ df2 without exceptions, when df1.columns and df2.index
are not identical
'''
c = set(df1.columns)
i = set(df2.index)
var = list(c - (c - i))
return df1[var] @ df2.loc[var]
# OS system commands
def ls(search, name_only=False, cos=None):
'''
emulates unix ls (without flags). Accepts wildcard/'*'
'''
search_split = search.replace('/', '/ ').split()
pattern = search_split[-1]
path = ''.join(search_split[:-1])
if cos is None:
# look in filesystem
# numpy array enables Boolean Mask
all_names = np.array(os.listdir(path))
else:
# look in cloud object store
all_names = np.array(cos.get_bucket_contents())
if not name_only and cos is None:
# add path to each name
all_names = np.array([path+name for name in all_names])
mask = [fnmatch(name, pattern) for name in all_names]
result = all_names[mask]
return result
# MATRIX-FACTORIZATION: DIMENSIONALITY REDUCTION & ARCHETYPING
# CLUSTER FEATURES INTO OCCUPATION CATEGORIES
# Use non-zero matrix factorization for clustering
# Use singular value decomposition first state for determining overall
# similarity
class Archetypes:
'''
Archetypes: Performs NMF of order n on X and stores the result as
attributes.
Archetypes are normalized: cosine similarity a(i) @ a(i) = 1.
Atributes:
my_archetypes.n - order / number of archetypes
my_archetypes.X - input matrix
my_archetypes.model - NMF model
my_archetypes.w - NMF w-matrix
my_archetypes.h - NMF h-matrix
my_archetypes.f - features x archetypes matrix (from h-matrix)
my_archetypes.fn - Dot-Normalized archetypes
my_archetypes.o - documents x archetypes matrix (from w-matrix)
my_archetypes.on - Sum-Normalized documents
'''
def __init__(self, X, n,
norm=norm_dot,
bootstrap=False, bootstrap_frac=0.5,
random_state=None):
self.n = n
self.X = X
self.norm = norm
self.random_state = random_state
if bootstrap:
self.bootstrap_n = bootstrap
self.bootstrap_frac = bootstrap_frac
else:
self.bootstrap_n = 1
self.bootstrap_frac = 1
self.model = NMF(
n_components=n,
init='random',
random_state=self.random_state,
max_iter=1000,
tol=0.0000001
)
self.w_dic = {}
self.o_dic = {}
self.h_dic = {}
self.f_dic = {}
for j in range(self.bootstrap_n):
XX = self.X.sample(int(len(self.X) * self.bootstrap_frac))
self.w_dic[j] = self.model.fit_transform(XX)
self.o_dic[j] = pd.DataFrame(self.w_dic[j], index=XX.index)
self.h_dic[j] = self.model.components_
self.f_dic[j] = pd.DataFrame(self.h_dic[j], columns=XX.columns)
self.w = self.w_dic[0] # TEMPORARY
self.o = self.o_dic[0] # TEMPORARY
self.h = self.h_dic[0] # TEMPORARY
self.f = self.f_dic[0] # TEMPORARY
self.fn = self.f.T.apply(norm_dot).T
self.on = self.o.T.apply(norm_sum).T
class Svd:
'''
Singular value decomposition-as-an-object
my_svd = Svd(X) returns
my_svd.u/.s/.vt – U S and VT from the Singular Value Decomposition
(see manual)
my_svd.f – Pandas.DataFrame: f=original features x svd_features
my_svd.o - Pandas.DataFrame: o=occupations x svd_features
my_svd.volume(keep_volume)
- collections.namedtuple ('dotted dicionary'):
Dimensionality reduction. keeps 'keep_volume' of
total variance
'''
def __init__(self, X):
self.u, self.s, self.vt = svd(np.array(X))
self.f = pd.DataFrame(self.vt, columns=X.columns)
self.o = pd.DataFrame(self.u, columns=X.index)
def volume(self, keep_volume):
'''
Dimensionality reduction, keeps 'keep_volume' proportion of
original variance
Type: collections.namedtuple ('dotted dictionary')
Examples of usage:
my_svd.volume(0.9).s - np.array: eigenvalues for 90% variance
my_svd.volume(0.8).f - dataframe: features for 80% variance
my_svd.volume(0.5).o - dataframe: occupations for 50% variance
'''
dotted_dic = collections.namedtuple('dotted_dic', 's f o')
a1 = self.s.cumsum()
a2 = a1/a1[-1]
n_max = np.argmin(np.square(a2 - keep_volume))
cut_dic = dotted_dic(
s=self.s[:n_max],
f=self.f.iloc[:n_max],
o=self.o.iloc[:n_max]
)
return cut_dic
class WatsonDocumentArchetypes:
'''
WatsonDocumentArchetypes performs Archetypal Analysis on a corpus
consisting of a set of documents, for example a set
of articles, books, news stories or medical dictations.
Input parameters:
PATH - Dictionary with paths to I/O
PATH['data'] - Directory for input text files.
Example: './data/input_texts/'
PATH['results'] - Directory for output.
Example: './data/output_nlu/'
NLU - Dictionary with information for running Watson NLU
NLU['apikey'] - apikey for running Watson NLU
NLU['apiurl'] - URL for Watson NLU API
NLU['version'] - Watson NLU version, e.g. '2019-07-12'
NLU['features'] - Features requested from Watson NLU for each
document in the set, e.g.
Features(
categories= CategoriesOptions(),
concepts = ConceptsOptions(),
entities = EntitiesOptions(),
keywords = KeywordsOptions(),
relations = RelationsOptions(),
syntax = SyntaxOptions()
)
Attributes:
self.PATH
'''
def __init__(self, PATH, NLU,
train_test=False,
random_state=None,
use_cloud_store=False):
from cloud_object_store import CloudObjectStore
self.PATH = PATH
self.NLU = NLU
self.random_state = random_state
# To random partition documents into train/test-sets,
# choose relative size of test-set, train_test (1 = 100%)
self.train_test = train_test
self.use_cloud_store = use_cloud_store
# Create clients to interface Watson and Cloud services
authenticator = IAMAuthenticator(NLU['apikey'])
self.nlu_model = NLUV1(
version=NLU['version'], authenticator=authenticator
)
self.nlu_model.set_service_url(NLU['apiurl'])
if self.use_cloud_store:
self.cos_dictations = CloudObjectStore(
PATH['dictation_bucket'],
PATH['cos_dictation_apikey'],
PATH['cos_dictation_crn'],
PATH['cos_dictation_endpoint']
)
self.cos_nlu = CloudObjectStore(
PATH['nlu_bucket'],
PATH['cos_nlu_apikey'],
PATH['cos_nlu_crn'],
PATH['cos_nlu_endpoint']
)
# Initiate X_matrix dictionaries
self.X_matrix_dic = {}
self.X_matrix_train_dic = {}
self.X_matrix_test_dic = {}
self.archetypes_dic = {}
self.svd_dic = {}
# PREPARE DATA
if self.use_cloud_store:
# load from cloud storage bucket
self.filenames = ls(
'*.txt', name_only=True, cos=self.cos_dictations
)
else:
# load from local file system
# all filenames ending with '.txt'
self.filenames = ls(self.PATH['data']+'*.txt', name_only=True)
self.names = [name.replace('.txt', '') for name in self.filenames]
# if train_test - self.names will be set to self.names_train
self.all_names = self.names * 1
# dictionary for dictation files
self.dictation_dic = {}
for name in self.filenames:
if (self.use_cloud_store):
self.dictation_dic[name.replace('.txt', '')] = \
self.cos_dictations.get_item(name).decode('utf-8')
else:
self.dictation_dic[name.replace('.txt', '')] = \
open(self.PATH['data']+name, encoding="utf-8").read()
self.dictation_df = pd.Series(self.dictation_dic)
# TRAIN-TEST SPLIT
if self.train_test:
# 0<train_test<1 - the proportion of names to save as 'test'
# (rounded downwards)
self.names_test, self.names_train = random_split(
self.all_names, self.train_test
)
self.names = self.names_train
# PERFORM WATSON NLU ANALYSIS
# IF DICTATION ALREADY HAS PKL WITH Watson NLU:
# READ EXISTING PKL. SKIP NEW WATSON CALC.
# Dictionary with Watson-NLU results for each dictation
self.watson = {}
if self.use_cloud_store:
# Check in Cloud storage bucket
self.watson_pkl = 'all_dictations_nlu.pkl'
pkl_exists = self.watson_pkl in self.cos_nlu.get_bucket_contents()
else:
# Check in local filesystem
self.watson_pkl = PATH['results']+'all_dictations_nlu.pkl'
pkl_exists = os.path.exists(self.watson_pkl)
if pkl_exists:
if self.use_cloud_store:
# load previous result from Cloud storage
self.watson = pickle.loads(
self.cos_nlu.get_item(self.watson_pkl)
)
else:
# load previous result from local filesystem
self.watson = pickle.load(open(self.watson_pkl, "rb"))
else:
# perform nlu-analysis on dictations
for item in list(self.dictation_dic.items()):
lbl = item[0]
text = item[1]
self.watson[lbl] = self.nlu_model.analyze(
text=text, features=NLU['features']
)
if self.use_cloud_store:
# save result to Cloud storage
self.cos_nlu.create_item(
str(lbl)+'_nlu.pkl',
pickle.dumps(self.watson[lbl])
)
else:
# save result to local filesystem
f = open(PATH['results']+str(lbl)+'_nlu.pkl', 'wb')
pickle.dump(self.watson[lbl], f)
f.close()
if self.use_cloud_store:
# save result to Cloud storage
self.cos_nlu.create_item(
self.watson_pkl, pickle.dumps(self.watson)
)
else:
f = open(self.watson_pkl, 'wb')
pickle.dump(self.watson, f)
f.close()
# Copy Watson NLU results to Pandas Dataframes
self.watson_nlu = {}
for dctn in self.watson.items():
self.watson_nlu[dctn[0]] = {}
for item in list(dctn[1].result.items()):
self.watson_nlu[dctn[0]][item[0]] = \
pd.DataFrame(list(item[1]))
# ARCHETYPAL ANALYSIS
# CONSTRUCT X- MATRIX
def X_matrix(self, typ='entities'):
'''
Construct the archetypal analysis X-matrix by pivoting the dataframe
in the dictionary my_wda.watson_nlu that contains the Watson NLU
analysis in question.
X_matrix(typ)
rows : Dictations
columns: Variables; keywords/entities/concepts, from Watson NLU
analysis
values : Weights, from Watson NLU analysis
The constructed X_matrix(typ) is saved as X_matrix_dic[typ]
If my_wda.train_test has a value (not False), X_matrix_train_dic[typ]
and X_matrix_test[typ] are added computed and added to their
respective dicionaries.
'''
if typ not in self.X_matrix_dic.keys():
df = pd.DataFrame()
for key in self.names:
dfx = self.watson_nlu[key][typ].copy()
dfx['dictation'] = key
df = df.append(dfx, sort=True)
if typ == 'entities':
df = df[df['type'] == 'HealthCondition']
df.rename({'relevance': 'rel0'}, axis=1, inplace=True)
df['relevance'] = df['rel0'] * df['confidence']
self.X_matrix_dic[typ] = df.pivot_table(
index='dictation', columns='text', values='relevance'
).fillna(0)
if self.train_test:
self.X_matrix_train_dic[typ] = self.X_matrix_dic[typ]
df = pd.DataFrame()
for key in self.names_test:
dfx = self.watson_nlu[key][typ].copy()
dfx['dictation'] = key
df = df.append(dfx, sort=True)
if typ == 'entities':
df = df[df['type'] == 'HealthCondition']
df.rename({'relevance': 'rel0'}, axis=1, inplace=True)
df['relevance'] = df['rel0'] * df['confidence']
self.X_matrix_test_dic[typ] = df.pivot_table(
index='dictation', columns='text', values='relevance'
).fillna(0)
return self.X_matrix_dic[typ]
# CALCULATE ARCHETYPES
def archetypes(self, typ='entities',
n_archs=6, bootstrap=False,
bootstrap_frac=0.5,
random_state=False,
norm=norm_sum):
if random_state is False:
random_state = self.random_state
if typ not in self.archetypes_dic.keys():
self.archetypes_dic[typ] = {}
hyperparam = (n_archs, bootstrap, bootstrap_frac, random_state, norm)
self.X_matrix(typ)
self.archetypes_dic[typ][hyperparam] = Archetypes(
self.X_matrix(typ), n_archs, bootstrap=bootstrap,
bootstrap_frac=bootstrap_frac, random_state=random_state,
norm=norm
)
return self.archetypes_dic[typ][hyperparam]
def display_archetype(self, arch_nr=-1, typ='entities',
n_archs=6, var='variables',
threshold=0.10, norm=scale):
fun = {
'variables': 'self.archetypes(typ = typ,n_archs = n_archs).f.T ',
'dictations': 'self.archetypes(typ = typ,n_archs = n_archs).o'
}
f = eval(fun[var])
fn = f.apply(norm)
if arch_nr == -1:
return sns.clustermap(f).data2d
else:
arc = fn.sort_values(by=arch_nr, ascending=False)
# normalized over sum: threshold is ignored volume
if norm is norm_sum:
arc_cs = arc[arch_nr].cumsum()
thresh_idx = abs(arc_cs - (1 - threshold)).values.argmin()
result = arc.iloc[:thresh_idx]
if norm is scale:
result = arc[
arc[arch_nr] >= (threshold * arc[arch_nr][0])
]
return result
# CALCULATE SVD
def svd(self, typ='entities'):
self.X_matrix(typ)
self.svd_dic[typ] = Svd(self.X_matrix(typ))
return
# ANALYZE A TEXT
def analyze(self, text, typ='entities'):
pass
|
py | 1a390f399daf68d8b0762a1950d9fb1584640d75 | # case where generator doesn't intercept the thrown/injected exception
def gen():
yield 123
yield 456
g = gen()
print(next(g))
try:
g.throw(KeyError)
except KeyError:
print('got KeyError from downstream!')
# case where a thrown exception is caught and stops the generator
def gen():
try:
yield 1
yield 2
except:
pass
g = gen()
print(next(g))
try:
g.throw(ValueError)
except StopIteration:
print('got StopIteration')
# generator ignores a thrown GeneratorExit (this is allowed)
def gen():
try:
yield 123
except GeneratorExit:
print('GeneratorExit')
yield 456
# thrown a class
g = gen()
print(next(g))
print(g.throw(GeneratorExit))
# thrown an instance
g = gen()
print(next(g))
print(g.throw(GeneratorExit()))
|
py | 1a390ff32f553249704e0afb0c0ecf69104c549c | # pyOCD debugger
# Copyright (c) 2018-2020 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .constants import (Commands, Status, SWD_FREQ_MAP, JTAG_FREQ_MAP)
from ...core import exceptions
from ...coresight import dap
from ...utility import conversion
from ...utility.mask import bfx
import logging
import struct
import six
import threading
from enum import Enum
LOG = logging.getLogger(__name__)
class STLink(object):
"""!
@brief STLink V2 and V3 command-level interface.
"""
class Protocol(Enum):
"""!
@brief Protocol options to pass to STLink.enter_debug() method.
"""
SWD = 1
JTAG = 2
## Maximum number of bytes to send or receive for 32- and 16- bit transfers.
#
# 8-bit transfers have a maximum size of the maximum USB packet size (64 bytes for full speed).
MAXIMUM_TRANSFER_SIZE = 1024
## Minimum required STLink firmware version (hw version 2).
MIN_JTAG_VERSION = 24
## Firmware version that adds 16-bit transfers (hw version 2).
MIN_JTAG_VERSION_16BIT_XFER = 26
## Firmware version that adds multiple AP support (hw version 2).
MIN_JTAG_VERSION_MULTI_AP = 28
## Firmware version that adds DP bank support.
#
# Keys are the hardware version, value is the minimum JTAG version.
MIN_JTAG_VERSION_DPBANKSEL = {2: 32, 3: 2}
## Port number to use to indicate DP registers.
DP_PORT = 0xffff
## Map to convert from STLink error response codes to exception classes.
_ERROR_CLASSES = {
# AP protocol errors
Status.SWD_AP_WAIT: exceptions.TransferTimeoutError,
Status.SWD_AP_FAULT: exceptions.TransferFaultError,
Status.SWD_AP_ERROR: exceptions.TransferError,
Status.SWD_AP_PARITY_ERROR: exceptions.TransferError,
# DP protocol errors
Status.SWD_DP_WAIT: exceptions.TransferTimeoutError,
Status.SWD_DP_FAULT: exceptions.TransferFaultError,
Status.SWD_DP_ERROR: exceptions.TransferError,
Status.SWD_DP_PARITY_ERROR: exceptions.TransferError,
# High level transaction errors
Status.SWD_AP_WDATA_ERROR: exceptions.TransferFaultError,
Status.SWD_AP_STICKY_ERROR: exceptions.TransferError,
Status.SWD_AP_STICKYORUN_ERROR: exceptions.TransferError,
}
## These errors indicate a memory fault.
_MEM_FAULT_ERRORS = (
Status.JTAG_UNKNOWN_ERROR, # Returned in some cases by older STLink firmware.
Status.SWD_AP_FAULT,
Status.SWD_DP_FAULT,
Status.SWD_AP_WDATA_ERROR,
Status.SWD_AP_STICKY_ERROR,
)
def __init__(self, device):
self._device = device
self._hw_version = 0
self._jtag_version = 0
self._version_str = None
self._target_voltage = 0
self._protocol = None
self._lock = threading.RLock()
def open(self):
with self._lock:
self._device.open()
self.enter_idle()
self.get_version()
self.get_target_voltage()
def close(self):
with self._lock:
self.enter_idle()
self._device.close()
def get_version(self):
# GET_VERSION response structure:
# Byte 0-1:
# [15:12] Major/HW version
# [11:6] JTAG/SWD version
# [5:0] SWIM or MSC version
# Byte 2-3: ST_VID
# Byte 4-5: STLINK_PID
response = self._device.transfer([Commands.GET_VERSION], readSize=6)
ver, = struct.unpack('>H', response[:2])
# TODO create version bitfield constants
self._hw_version = bfx(ver, 15, 12)
self._jtag_version = bfx(ver, 11, 6)
self._msc_version = bfx(ver, 5, 0)
# For STLinkV3 we must use the extended get version command.
if self._hw_version >= 3:
# GET_VERSION_EXT response structure (byte offsets):
# 0: HW version
# 1: SWIM version
# 2: JTAG/SWD version
# 3: MSC/VCP version
# 4: Bridge version
# 5-7: reserved
# 8-9: ST_VID
# 10-11: STLINK_PID
response = self._device.transfer([Commands.GET_VERSION_EXT], readSize=12)
hw_vers, _, self._jtag_version, self._msc_version = struct.unpack('<4B', response[0:4])
self._version_str = "V%dJ%dM%d" % (self._hw_version, self._jtag_version, self._msc_version)
LOG.debug("STLink probe %s firmware version: %s", self.serial_number, self._version_str)
# Check versions.
if self._jtag_version == 0:
raise exceptions.ProbeError("%s firmware does not support JTAG/SWD. Please update"
"to a firmware version that supports JTAG/SWD" % (self._version_str))
if not self._check_version(self.MIN_JTAG_VERSION):
raise exceptions.ProbeError("STLink %s is using an unsupported, older firmware version. "
"Please update to the latest STLink firmware. Current version is %s, must be at least version v2J%d.)"
% (self.serial_number, self._version_str, self.MIN_JTAG_VERSION))
def _check_version(self, min_version):
return (self._hw_version >= 3) or (self._jtag_version >= min_version)
@property
def vendor_name(self):
return self._device.vendor_name
@property
def product_name(self):
return self._device.product_name
@property
def serial_number(self):
return self._device.serial_number
@property
def hw_version(self):
return self._hw_version
@property
def jtag_version(self):
return self._jtag_version
@property
def version_str(self):
return self._version_str
@property
def target_voltage(self):
return self._target_voltage
@property
def supports_banked_dp(self):
"""! @brief Whether the firmware version supports accessing banked DP registers.
This property is not valid until the connection is opened.
"""
return self._jtag_version >= self.MIN_JTAG_VERSION_DPBANKSEL[self._hw_version]
def get_target_voltage(self):
response = self._device.transfer([Commands.GET_TARGET_VOLTAGE], readSize=8)
a0, a1 = struct.unpack('<II', response[:8])
self._target_voltage = 2 * a1 * 1.2 / a0 if a0 != 0 else None
def enter_idle(self):
with self._lock:
response = self._device.transfer([Commands.GET_CURRENT_MODE], readSize=2)
if response[0] == Commands.DEV_DFU_MODE:
self._device.transfer([Commands.DFU_COMMAND, Commands.DFU_EXIT])
elif response[0] == Commands.DEV_JTAG_MODE:
self._device.transfer([Commands.JTAG_COMMAND, Commands.JTAG_EXIT])
elif response[0] == Commands.DEV_SWIM_MODE:
self._device.transfer([Commands.SWIM_COMMAND, Commands.SWIM_EXIT])
self._protocol = None
def set_swd_frequency(self, freq=1800000):
with self._lock:
if self._hw_version >= 3:
self.set_com_frequency(self.Protocol.JTAG, freq)
else:
for f, d in SWD_FREQ_MAP.items():
if freq >= f:
response = self._device.transfer([Commands.JTAG_COMMAND, Commands.SWD_SET_FREQ, d], readSize=2)
self._check_status(response)
return
raise exceptions.ProbeError("Selected SWD frequency is too low")
def set_jtag_frequency(self, freq=1120000):
with self._lock:
if self._hw_version >= 3:
self.set_com_frequency(self.Protocol.JTAG, freq)
else:
for f, d in JTAG_FREQ_MAP.items():
if freq >= f:
response = self._device.transfer([Commands.JTAG_COMMAND, Commands.JTAG_SET_FREQ, d], readSize=2)
self._check_status(response)
return
raise exceptions.ProbeError("Selected JTAG frequency is too low")
def get_com_frequencies(self, protocol):
assert self._hw_version >= 3
with self._lock:
cmd = [Commands.JTAG_COMMAND, Commands.GET_COM_FREQ, protocol.value - 1]
response = self._device.transfer(cmd, readSize=52)
self._check_status(response[0:2])
freqs = conversion.byte_list_to_u32le_list(response[4:52])
currentFreq = freqs.pop(0)
freqCount = freqs.pop(0)
return currentFreq, freqs[:freqCount]
def set_com_frequency(self, protocol, freq):
assert self._hw_version >= 3
with self._lock:
cmd = [Commands.JTAG_COMMAND, Commands.SET_COM_FREQ, protocol.value - 1, 0]
cmd.extend(conversion.u32le_list_to_byte_list([freq // 1000]))
response = self._device.transfer(cmd, readSize=8)
self._check_status(response[0:2])
freqs = conversion.byte_list_to_u32le_list(response[4:8])
return freqs[0]
def enter_debug(self, protocol):
with self._lock:
self.enter_idle()
if protocol == self.Protocol.SWD:
protocolParam = Commands.JTAG_ENTER_SWD
elif protocol == self.Protocol.JTAG:
protocolParam = Commands.JTAG_ENTER_JTAG_NO_CORE_RESET
response = self._device.transfer([Commands.JTAG_COMMAND, Commands.JTAG_ENTER2, protocolParam, 0], readSize=2)
self._check_status(response)
self._protocol = protocol
def open_ap(self, apsel):
with self._lock:
if not self._check_version(self.MIN_JTAG_VERSION_MULTI_AP):
return
cmd = [Commands.JTAG_COMMAND, Commands.JTAG_INIT_AP, apsel, Commands.JTAG_AP_NO_CORE]
response = self._device.transfer(cmd, readSize=2)
self._check_status(response)
def close_ap(self, apsel):
with self._lock:
if not self._check_version(self.MIN_JTAG_VERSION_MULTI_AP):
return
cmd = [Commands.JTAG_COMMAND, Commands.JTAG_CLOSE_AP_DBG, apsel]
response = self._device.transfer(cmd, readSize=2)
self._check_status(response)
def target_reset(self):
with self._lock:
response = self._device.transfer([Commands.JTAG_COMMAND, Commands.JTAG_DRIVE_NRST, Commands.JTAG_DRIVE_NRST_PULSE], readSize=2)
self._check_status(response)
def drive_nreset(self, isAsserted):
with self._lock:
value = Commands.JTAG_DRIVE_NRST_LOW if isAsserted else Commands.JTAG_DRIVE_NRST_HIGH
response = self._device.transfer([Commands.JTAG_COMMAND, Commands.JTAG_DRIVE_NRST, value], readSize=2)
self._check_status(response)
def _check_status(self, response):
status, = struct.unpack('<H', response)
if status != Status.JTAG_OK:
error_message = Status.get_error_message(status)
if status in self._ERROR_CLASSES:
raise self._ERROR_CLASSES[status](error_message)
else:
raise exceptions.ProbeError(error_message)
def _clear_sticky_error(self):
with self._lock:
if self._protocol == self.Protocol.SWD:
self.write_dap_register(self.DP_PORT, dap.DP_ABORT,
dap.ABORT_ORUNERRCLR | dap.ABORT_WDERRCLR | dap.ABORT_STKERRCLR | dap.ABORT_STKCMPCLR)
elif self._protocol == self.Protocol.JTAG:
self.write_dap_register(self.DP_PORT, dap.DP_CTRL_STAT,
dap.CTRLSTAT_STICKYERR | dap.CTRLSTAT_STICKYCMP | dap.CTRLSTAT_STICKYORUN)
def _read_mem(self, addr, size, memcmd, max, apsel):
with self._lock:
result = []
while size:
thisTransferSize = min(size, max)
cmd = [Commands.JTAG_COMMAND, memcmd]
cmd.extend(six.iterbytes(struct.pack('<IHB', addr, thisTransferSize, apsel)))
result += self._device.transfer(cmd, readSize=thisTransferSize)
addr += thisTransferSize
size -= thisTransferSize
# Check status of this read.
response = self._device.transfer([Commands.JTAG_COMMAND, Commands.JTAG_GETLASTRWSTATUS2], readSize=12)
status, _, faultAddr = struct.unpack('<HHI', response[0:8])
# Handle transfer faults specially so we can assign the address info.
if status != Status.JTAG_OK:
error_message = Status.get_error_message(status)
if status in self._MEM_FAULT_ERRORS:
# Clear sticky errors.
self._clear_sticky_error()
exc = exceptions.TransferFaultError("read")
exc.fault_address = faultAddr
exc.fault_length = thisTransferSize - (faultAddr - addr)
raise exc
elif status in self._ERROR_CLASSES:
raise self._ERROR_CLASSES[status](error_message)
elif status != Status.JTAG_OK:
raise exceptions.ProbeError(error_message)
return result
def _write_mem(self, addr, data, memcmd, max, apsel):
with self._lock:
while len(data):
thisTransferSize = min(len(data), max)
thisTransferData = data[:thisTransferSize]
cmd = [Commands.JTAG_COMMAND, memcmd]
cmd.extend(six.iterbytes(struct.pack('<IHB', addr, thisTransferSize, apsel)))
self._device.transfer(cmd, writeData=thisTransferData)
addr += thisTransferSize
data = data[thisTransferSize:]
# Check status of this write.
response = self._device.transfer([Commands.JTAG_COMMAND, Commands.JTAG_GETLASTRWSTATUS2], readSize=12)
status, _, faultAddr = struct.unpack('<HHI', response[0:8])
# Handle transfer faults specially so we can assign the address info.
if status != Status.JTAG_OK:
error_message = Status.get_error_message(status)
if status in self._MEM_FAULT_ERRORS:
# Clear sticky errors.
self._clear_sticky_error()
exc = exceptions.TransferFaultError("write")
exc.fault_address = faultAddr
exc.fault_length = thisTransferSize - (faultAddr - addr)
raise exc
elif status in self._ERROR_CLASSES:
raise self._ERROR_CLASSES[status](error_message)
elif status != Status.JTAG_OK:
raise exceptions.ProbeError(error_message)
def read_mem32(self, addr, size, apsel):
assert (addr & 0x3) == 0 and (size & 0x3) == 0, "address and size must be word aligned"
return self._read_mem(addr, size, Commands.JTAG_READMEM_32BIT, self.MAXIMUM_TRANSFER_SIZE, apsel)
def write_mem32(self, addr, data, apsel):
assert (addr & 0x3) == 0 and (len(data) & 3) == 0, "address and size must be word aligned"
self._write_mem(addr, data, Commands.JTAG_WRITEMEM_32BIT, self.MAXIMUM_TRANSFER_SIZE, apsel)
def read_mem16(self, addr, size, apsel):
assert (addr & 0x1) == 0 and (size & 0x1) == 0, "address and size must be half-word aligned"
if not self._check_version(self.MIN_JTAG_VERSION_16BIT_XFER):
# 16-bit r/w is only available from J26, so revert to 8-bit accesses.
return self.read_mem8(addr, size, apsel)
return self._read_mem(addr, size, Commands.JTAG_READMEM_16BIT, self.MAXIMUM_TRANSFER_SIZE, apsel)
def write_mem16(self, addr, data, apsel):
assert (addr & 0x1) == 0 and (len(data) & 1) == 0, "address and size must be half-word aligned"
if not self._check_version(self.MIN_JTAG_VERSION_16BIT_XFER):
# 16-bit r/w is only available from J26, so revert to 8-bit accesses.
self.write_mem8(addr, data, apsel)
return
self._write_mem(addr, data, Commands.JTAG_WRITEMEM_16BIT, self.MAXIMUM_TRANSFER_SIZE, apsel)
def read_mem8(self, addr, size, apsel):
return self._read_mem(addr, size, Commands.JTAG_READMEM_8BIT, self._device.max_packet_size, apsel)
def write_mem8(self, addr, data, apsel):
self._write_mem(addr, data, Commands.JTAG_WRITEMEM_8BIT, self._device.max_packet_size, apsel)
def _check_dp_bank(self, port, addr):
"""! @brief Check if attempting to access a banked DP register with a firmware version that
doesn't support that.
"""
if ((port == self.DP_PORT) and ((addr & 0xf0) != 0) and not self.supports_banked_dp):
raise exceptions.ProbeError("this STLinkV%d firmware version does not support accessing"
" banked DP registers; please upgrade to the latest STLinkV%d firmware release",
self._hw_version, self._hw_version)
def read_dap_register(self, port, addr):
assert (addr >> 16) == 0, "register address must be 16-bit"
self._check_dp_bank(port, addr)
with self._lock:
cmd = [Commands.JTAG_COMMAND, Commands.JTAG_READ_DAP_REG]
cmd.extend(six.iterbytes(struct.pack('<HH', port, addr)))
response = self._device.transfer(cmd, readSize=8)
self._check_status(response[:2])
value, = struct.unpack('<I', response[4:8])
return value
def write_dap_register(self, port, addr, value):
assert (addr >> 16) == 0, "register address must be 16-bit"
self._check_dp_bank(port, addr)
with self._lock:
cmd = [Commands.JTAG_COMMAND, Commands.JTAG_WRITE_DAP_REG]
cmd.extend(six.iterbytes(struct.pack('<HHI', port, addr, value)))
response = self._device.transfer(cmd, readSize=2)
self._check_status(response)
def swo_start(self, baudrate):
with self._lock:
bufferSize = 4096
cmd = [Commands.JTAG_COMMAND, Commands.SWV_START_TRACE_RECEPTION]
cmd.extend(six.iterbytes(struct.pack('<HI', bufferSize, baudrate)))
response = self._device.transfer(cmd, readSize=2)
self._check_status(response)
def swo_stop(self):
with self._lock:
cmd = [Commands.JTAG_COMMAND, Commands.SWV_STOP_TRACE_RECEPTION]
response = self._device.transfer(cmd, readSize=2)
self._check_status(response)
def swo_read(self):
with self._lock:
response = None
bytesAvailable = None
try:
cmd = [Commands.JTAG_COMMAND, Commands.SWV_GET_TRACE_NEW_RECORD_NB]
response = self._device.transfer(cmd, readSize=2)
bytesAvailable, = struct.unpack('<H', response)
if bytesAvailable:
return self._device.read_swv(bytesAvailable)
else:
return bytearray()
except KeyboardInterrupt:
# If we're interrupted after sending the SWV_GET_TRACE_NEW_RECORD_NB command,
# we have to read the queued SWV data before any other commands can be sent.
if response is not None:
if bytesAvailable is None:
bytesAvailable, = struct.unpack('<H', response)
if bytesAvailable:
self._device.read_swv(bytesAvailable)
|
py | 1a3911a2d12c4d8c526260574002c1b1029d767f | import torch
import torch.nn as nn
import torch.nn.functional as F
# from CAPS.effiUnet_v3 import EfficientUNet
from loguru import logger
# from CAPS.effiUnet_v3_1 import EfficientUNet
from CAPS.effiUnet_v4 import EfficientUNet
class CAPSNet(nn.Module):
def __init__(self, args, device):
super(CAPSNet, self).__init__()
self.args = args
self.device = device
self.net = EfficientUNet()
if args.phase == "train":
if not args.magic_pretrain:
raise Exception("args.magic_pretrain should not be none in traing mode")
magic_net_model_dict = torch.load(args.magic_pretrain)
self.net.magic_net.load_state_dict(magic_net_model_dict)
self.net.to(device)
for param in self.net.magic_net.parameters():
param.requires_grad = False
@staticmethod
def normalize(coord, h, w):
'''
turn the coordinates from pixel indices to the range of [-1, 1]
:param coord: [..., 2]
:param h: the image height
:param w: the image width
:return: the normalized coordinates [..., 2]
'''
c = torch.Tensor([(w - 1) / 2., (h - 1) / 2.]).to(coord.device).float()
coord_norm = (coord - c) / c
return coord_norm
@staticmethod
def denormalize(coord_norm, h, w):
'''
turn the coordinates from normalized value ([-1, 1]) to actual pixel indices
:param coord_norm: [..., 2]
:param h: the image height
:param w: the image width
:return: actual pixel coordinates
'''
c = torch.Tensor([(w - 1) / 2., (h - 1) / 2.]).to(coord_norm.device)
coord = coord_norm * c + c
return coord
def ind2coord(self, ind, width):
ind = ind.unsqueeze(-1)
x = ind % width
# y = ind // width
y = torch.div(ind, width, rounding_mode='floor')
coord = torch.cat((x, y), -1).float()
return coord
def gen_grid(self, h_min, h_max, w_min, w_max, len_h, len_w):
x, y = torch.meshgrid([torch.linspace(w_min, w_max, len_w), torch.linspace(h_min, h_max, len_h)])
grid = torch.stack((x, y), -1).transpose(0, 1).reshape(-1, 2).float().to(self.device)
return grid
def sample_feat_by_coord(self, x, coord_n, norm=False):
'''
sample from normalized coordinates
:param x: feature map [batch_size, n_dim, h, w]
:param coord_n: normalized coordinates, [batch_size, n_pts, 2]
:param norm: if l2 normalize features
:return: the extracted features, [batch_size, n_pts, n_dim]
'''
feat = F.grid_sample(x, coord_n.unsqueeze(2), align_corners=True).squeeze(-1)
if norm:
feat = F.normalize(feat)
feat = feat.transpose(1, 2)
return feat
def compute_prob(self, feat1, feat2):
'''
compute probability
:param feat1: query features, [batch_size, m, n_dim]
:param feat2: reference features, [batch_size, n, n_dim]
:return: probability, [batch_size, m, n]
'''
assert self.args.prob_from in ['correlation', 'distance']
if self.args.prob_from == 'correlation':
sim = feat1.bmm(feat2.transpose(1, 2))
prob = F.softmax(sim, dim=-1) # Bxmxn
else:
dist = torch.sum(feat1**2, dim=-1, keepdim=True) + \
torch.sum(feat2**2, dim=-1, keepdim=True).transpose(1, 2) - \
2 * feat1.bmm(feat2.transpose(1, 2))
prob = F.softmax(-dist, dim=-1) # Bxmxn
return prob
def get_1nn_coord(self, feat1, featmap2):
'''
find the coordinates of nearest neighbor match
:param feat1: query features, [batch_size, n_pts, n_dim]
:param featmap2: the feature maps of the other image
:return: normalized correspondence locations [batch_size, n_pts, 2]
'''
batch_size, d, h, w = featmap2.shape
feat2_flatten = featmap2.reshape(batch_size, d, h*w).transpose(1, 2) # Bx(hw)xd
assert self.args.prob_from in ['correlation', 'distance']
if self.args.prob_from == 'correlation':
sim = feat1.bmm(feat2_flatten.transpose(1, 2))
ind2_1nn = torch.max(sim, dim=-1)[1]
else:
dist = torch.sum(feat1**2, dim=-1, keepdim=True) + \
torch.sum(feat2_flatten**2, dim=-1, keepdim=True).transpose(1, 2) - \
2 * feat1.bmm(feat2_flatten.transpose(1, 2))
ind2_1nn = torch.min(dist, dim=-1)[1]
coord2 = self.ind2coord(ind2_1nn, w)
coord2_n = self.normalize(coord2, h, w)
return coord2_n
def get_expected_correspondence_locs(self, feat1, featmap2, with_std=False):
'''
compute the expected correspondence locations
:param feat1: the feature vectors of query points [batch_size, n_pts, n_dim]
:param featmap2: the feature maps of the reference image [batch_size, n_dim, h, w]
:param with_std: if return the standard deviation
:return: the normalized expected correspondence locations [batch_size, n_pts, 2]
'''
B, d, h2, w2 = featmap2.size()
grid_n = self.gen_grid(-1, 1, -1, 1, h2, w2)
featmap2_flatten = featmap2.reshape(B, d, h2*w2).transpose(1, 2) # BX(hw)xd
prob = self.compute_prob(feat1, featmap2_flatten) # Bxnx(hw)
grid_n = grid_n.unsqueeze(0).unsqueeze(0) # 1x1x(hw)x2
expected_coord_n = torch.sum(grid_n * prob.unsqueeze(-1), dim=2) # Bxnx2
if with_std:
# convert to normalized scale [-1, 1]
var = torch.sum(grid_n**2 * prob.unsqueeze(-1), dim=2) - expected_coord_n**2 # Bxnx2
std = torch.sum(torch.sqrt(torch.clamp(var, min=1e-10)), -1) # Bxn
return expected_coord_n, std
else:
return expected_coord_n
def get_expected_correspondence_within_window(self, feat1, featmap2, coord2_n, with_std=False):
'''
:param feat1: the feature vectors of query points [batch_size, n_pts, n_dim]
:param featmap2: the feature maps of the reference image [batch_size, n_dim, h, w]
:param coord2_n: normalized center locations [batch_size, n_pts, 2]
:param with_std: if return the standard deviation
:return: the normalized expected correspondence locations, [batch_size, n_pts, 2], optionally with std
'''
batch_size, n_dim, h2, w2 = featmap2.shape
n_pts = coord2_n.shape[1]
grid_n = self.gen_grid(h_min=-self.args.window_size, h_max=self.args.window_size,
w_min=-self.args.window_size, w_max=self.args.window_size,
len_h=int(self.args.window_size*h2), len_w=int(self.args.window_size*w2))
grid_n_ = grid_n.repeat(batch_size, 1, 1, 1) # Bx1xhwx2
coord2_n_grid = coord2_n.unsqueeze(-2) + grid_n_ # Bxnxhwx2
feat2_win = F.grid_sample(featmap2, coord2_n_grid, padding_mode='zeros', align_corners=True).permute(0, 2, 3, 1) # Bxnxhwxd
feat1 = feat1.unsqueeze(-2)
prob = self.compute_prob(feat1.reshape(batch_size*n_pts, -1, n_dim),
feat2_win.reshape(batch_size*n_pts, -1, n_dim)).reshape(batch_size, n_pts, -1)
expected_coord2_n = torch.sum(coord2_n_grid * prob.unsqueeze(-1), dim=2) # Bxnx2
if with_std:
var = torch.sum(coord2_n_grid**2 * prob.unsqueeze(-1), dim=2) - expected_coord2_n**2 # Bxnx2
std = torch.sum(torch.sqrt(torch.clamp(var, min=1e-10)), -1) # Bxn
return expected_coord2_n, std
else:
return expected_coord2_n
def forward(self, im1, im2, coord1):
# extract features for both images
# modify the output
# xf1 = self.net(im1)
# xf2 = self.net(im2)
prob_nms1, xf1 = self.net(im1)
prob_nms2, xf2 = self.net(im2)
# image width and height
h1i, w1i = im1.size()[2:]
h2i, w2i = im2.size()[2:]
# normalize coordination
coord1_n = self.normalize(coord1, h1i, w1i)
# the center locations of the local window for fine level computation
feat1_fine = self.sample_feat_by_coord(xf1, coord1_n) # Bxnxd
coord2_ef_n, std_2f = self.get_expected_correspondence_locs(feat1_fine, xf2, with_std=True)
feat2_fine = self.sample_feat_by_coord(xf2, coord2_ef_n) # Bxnxd
coord1_ef_n, std_1f = self.get_expected_correspondence_locs(feat2_fine, xf1, with_std=True)
coord2_ef = self.denormalize(coord2_ef_n, h2i, w2i)
coord1_ef = self.denormalize(coord1_ef_n, h1i, w1i)
return {'coord2_ef': coord2_ef, 'coord1_ef': coord1_ef,
'std_1f': std_1f, 'std_2f': std_2f}
def extract_features(self, im, coord):
'''
extract coarse and fine level features given the input image and 2d locations
:param im: [batch_size, 3, h, w]
:param coord: [batch_size, n_pts, 2]
:return: coarse features [batch_size, n_pts, coarse_feat_dim] and fine features [batch_size, n_pts, fine_feat_dim]
'''
xf = self.net(im)
hi, wi = im.size()[2:]
coord_n = self.normalize(coord, hi, wi)
feat_f = self.sample_feat_by_coord(xf, coord_n)
return feat_f
def exetrct_det_and_des(self, im, src_shape):
prob_nms, xf = self.net(im)
# logger.info("im shape: {}".format(im.shape))
# logger.info("prob_nms.shape: {}".format(prob_nms.shape))
# logger.info("xf shape: {}".format(xf.shape))
prob_nms = prob_nms.squeeze(dim=1)
edge_size = 30
prob_nms[:, :edge_size, :] = -1
prob_nms[:, :, :edge_size] = -1
prob_nms[:, src_shape[0] - edge_size:, :] = -1
prob_nms[:, :, src_shape[1] - edge_size:] = -1
# preds = [pred > 0.015 for pred in prob_nms]
points = [torch.stack(torch.where(pred > 0.015)).T for pred in prob_nms]
points = [torch.flip(element, dims=[1]) for element in points]
# logger.info("prob_nms.shape: {}".format(prob_nms.shape))
# logger.info("the first pred shape is : {}".format(preds[0].shape))
# logger.info("len preds is: {}".format(len(preds)))
# logger.info("points len: {}".format(len(points[0])))
# print(points[0])
# print(points[0])
hi, wi = im.size()[2:]
batch_size = im.size()[0]
discriptor = list()
for i in range(batch_size):
coord_n = self.normalize(points[i], hi, wi)
feat_f = self.sample_feat_by_coord(xf[i: i+1], coord_n.unsqueeze(dim=0))
discriptor.append(feat_f)
return points, discriptor
def test(self, im1, im2, coord1):
'''
given a pair of images im1, im2, compute the coorrespondences for query points coord1.
We performa full image search at coarse level and local search at fine level
:param im1: [batch_size, 3, h, w]
:param im2: [batch_size, 3, h, w]
:param coord1: [batch_size, n_pts, 2]
:return: the fine level correspondence location [batch_size, n_pts, 2]
'''
xc1, xf1 = self.net(im1)
xc2, xf2 = self.net(im2)
h1i, w1i = im1.shape[2:]
h2i, w2i = im2.shape[2:]
coord1_n = self.normalize(coord1, h1i, w1i)
feat1_c = self.sample_feat_by_coord(xc1, coord1_n)
_, std_c = self.get_expected_correspondence_locs(feat1_c, xc2, with_std=True)
coord2_ec_n = self.get_1nn_coord(feat1_c, xc2)
feat1_f = self.sample_feat_by_coord(xf1, coord1_n)
_, std_f = self.get_expected_correspondence_within_window(feat1_f, xf2, coord2_ec_n, with_std=True)
coord2_ef_n = self.get_1nn_coord(feat1_f, xf2)
coord2_ef = self.denormalize(coord2_ef_n, h2i, w2i)
std = (std_c + std_f)/2
return coord2_ef, std
|
py | 1a3912d50e8c62bb220c6e7360ea0bc9d159f180 | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.9 Python SDK
Pure Storage FlashBlade REST 1.9 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.9
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class CertificateGroupUse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'use': 'FixedReferenceWithRemote'
}
attribute_map = {
'id': 'id',
'name': 'name',
'use': 'use'
}
def __init__(self, id=None, name=None, use=None):
"""
CertificateGroupUse - a model defined in Swagger
"""
self._id = None
self._name = None
self._use = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if use is not None:
self.use = use
@property
def id(self):
"""
Gets the id of this CertificateGroupUse.
A non-modifiable, globally unique ID chosen by the system.
:return: The id of this CertificateGroupUse.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this CertificateGroupUse.
A non-modifiable, globally unique ID chosen by the system.
:param id: The id of this CertificateGroupUse.
:type: str
"""
self._id = id
@property
def name(self):
"""
Gets the name of this CertificateGroupUse.
The name of the object (e.g., a file system or snapshot).
:return: The name of this CertificateGroupUse.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this CertificateGroupUse.
The name of the object (e.g., a file system or snapshot).
:param name: The name of this CertificateGroupUse.
:type: str
"""
self._name = name
@property
def use(self):
"""
Gets the use of this CertificateGroupUse.
A reference to an object using this certificate group.
:return: The use of this CertificateGroupUse.
:rtype: FixedReferenceWithRemote
"""
return self._use
@use.setter
def use(self, use):
"""
Sets the use of this CertificateGroupUse.
A reference to an object using this certificate group.
:param use: The use of this CertificateGroupUse.
:type: FixedReferenceWithRemote
"""
self._use = use
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, CertificateGroupUse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
py | 1a3912ed0bfc93425a060aef6a088c88dc9eed18 | # Generated by Django 3.2 on 2021-07-13 18:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cats', '0003_auto_20210711_2127'),
]
operations = [
migrations.AlterField(
model_name='cat',
name='color',
field=models.CharField(choices=[('Gray', 'Серый'), ('Black', 'Чёрный'), ('White', 'Белый'), ('Ginger', 'Рыжий'), ('Mixed', 'Смешанный')], max_length=16),
),
]
|
py | 1a3913caf835dd6dc21ebdb2943ad0fd62ccedd8 | from django.contrib import admin
from .models import Post
from .models import Notice
from .models import News
# Register your models here.
class PostAdmin(admin.ModelAdmin):
fields = ['title', 'auth', 'body']
class NoticeAdmin(admin.ModelAdmin):
fields = ['subject', 'name', 'date', 'body']
class NewsAdmin(admin.ModelAdmin):
fields = ['subject', 'name', 'date', 'body']
admin.site.register(Post, PostAdmin)
admin.site.register(Notice, NoticeAdmin)
admin.site.register(News, NewsAdmin)
|
py | 1a39148b83cbdcc337ef66e2e153a66360d338b5 | import warnings
from collections import defaultdict
from itertools import chain
from typing import Dict, Iterable, Iterator, List, Optional, Set
from math import ceil
from maggma.core import Builder, Store
from maggma.utils import grouper
from monty.json import MontyDecoder
from pymatgen.analysis.phase_diagram import PhaseDiagramError
from pymatgen.entries.compatibility import MaterialsProject2020Compatibility
from pymatgen.entries.computed_entries import ComputedStructureEntry
from emmet.builders.utils import chemsys_permutations
from emmet.core.thermo import ThermoDoc, PhaseDiagramDoc
from emmet.core.utils import jsanitize
class ThermoBuilder(Builder):
def __init__(
self,
materials: Store,
thermo: Store,
phase_diagram: Optional[Store] = None,
oxidation_states: Optional[Store] = None,
query: Optional[Dict] = None,
compatibility=None,
num_phase_diagram_eles: Optional[int] = None,
**kwargs,
):
"""
Calculates thermodynamic quantities for materials from phase
diagram constructions
Args:
materials (Store): Store of materials documents
thermo (Store): Store of thermodynamic data such as formation
energy and decomposition pathway
phase_diagram (Store): Store of phase diagram data for each unique chemical system
oxidation_states (Store): Store of oxidation state data to use in correction scheme application
query (dict): dictionary to limit materials to be analyzed
compatibility (PymatgenCompatability): Compatability module
to ensure energies are compatible
num_phase_diagram_eles (int): Maximum number of elements to use in phase diagram construction
for data within the separate phase_diagram collection
"""
self.materials = materials
self.thermo = thermo
self.query = query if query else {}
self.compatibility = (
compatibility
if compatibility
else MaterialsProject2020Compatibility("Advanced")
)
self.oxidation_states = oxidation_states
self.phase_diagram = phase_diagram
self.num_phase_diagram_eles = num_phase_diagram_eles
self._completed_tasks: Set[str] = set()
self._entries_cache: Dict[str, List[ComputedStructureEntry]] = defaultdict(list)
sources = [materials]
if oxidation_states is not None:
sources.append(oxidation_states)
targets = [thermo]
if phase_diagram is not None:
targets.append(phase_diagram)
super().__init__(sources=sources, targets=targets, **kwargs)
def ensure_indexes(self):
"""
Ensures indicies on the tasks and materials collections
"""
# Search index for materials
self.materials.ensure_index("material_id")
self.materials.ensure_index("chemsys")
self.materials.ensure_index("last_updated")
# Search index for thermo
self.thermo.ensure_index("material_id")
self.thermo.ensure_index("last_updated")
# Search index for phase_diagram
if self.phase_diagram:
self.phase_diagram.ensure_index("chemsys")
def prechunk(self, number_splits: int) -> Iterable[Dict]: # pragma: no cover
updated_chemsys = self.get_updated_chemsys()
new_chemsys = self.get_new_chemsys()
affected_chemsys = self.get_affected_chemsys(updated_chemsys | new_chemsys)
# Remove overlapping chemical systems
to_process_chemsys = set()
for chemsys in updated_chemsys | new_chemsys | affected_chemsys:
if chemsys not in to_process_chemsys:
to_process_chemsys |= chemsys_permutations(chemsys)
N = ceil(len(to_process_chemsys) / number_splits)
for chemsys_chunk in grouper(to_process_chemsys, N):
yield {"query": {"chemsys": {"$in": list(chemsys_chunk)}}}
def get_items(self) -> Iterator[List[Dict]]:
"""
Gets whole chemical systems of entries to process
"""
self.logger.info("Thermo Builder Started")
self.logger.info("Setting indexes")
self.ensure_indexes()
updated_chemsys = self.get_updated_chemsys()
new_chemsys = self.get_new_chemsys()
affected_chemsys = self.get_affected_chemsys(updated_chemsys | new_chemsys)
# Remove overlapping chemical systems
processed = set()
to_process_chemsys = []
for chemsys in sorted(
updated_chemsys | new_chemsys | affected_chemsys,
key=lambda x: len(x),
reverse=True,
):
if chemsys not in processed:
processed |= chemsys_permutations(chemsys)
to_process_chemsys.append(chemsys)
self.logger.info(
f"Found {len(to_process_chemsys)} chemical systems with new/updated materials to process"
)
self.total = len(to_process_chemsys)
# Yield the chemical systems in order of increasing size
# Will build them in a similar manner to fast Pourbaix
for chemsys in sorted(
to_process_chemsys, key=lambda x: len(x.split("-")), reverse=False
):
entries = self.get_entries(chemsys)
yield entries
def process_item(self, item: List[Dict]):
if len(item) == 0:
return []
entries = [ComputedStructureEntry.from_dict(entry) for entry in item]
# determine chemsys
elements = sorted(
set([el.symbol for e in entries for el in e.composition.elements])
)
chemsys = "-".join(elements)
self.logger.debug(f"Processing {len(entries)} entries for {chemsys}")
material_entries: Dict[str, Dict[str, ComputedStructureEntry]] = defaultdict(
dict
)
pd_entries = []
for entry in entries:
material_entries[entry.entry_id][entry.data["run_type"]] = entry
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message="Failed to guess oxidation states.*"
)
pd_entries = self.compatibility.process_entries(entries)
self.logger.debug(f"{len(pd_entries)} remain in {chemsys} after filtering")
try:
docs, pd = ThermoDoc.from_entries(pd_entries, deprecated=False)
for doc in docs:
doc.entries = material_entries[doc.material_id]
doc.entry_types = list(material_entries[doc.material_id].keys())
pd_data = None
if self.phase_diagram:
if (
self.num_phase_diagram_eles is None
or len(elements) <= self.num_phase_diagram_eles
):
pd_doc = PhaseDiagramDoc(chemsys=chemsys, phase_diagram=pd)
pd_data = jsanitize(pd_doc.dict(), allow_bson=True)
docs_pd_pair = (
jsanitize([d.dict() for d in docs], allow_bson=True),
[pd_data],
)
except PhaseDiagramError as p:
elsyms = []
for e in entries:
elsyms.extend([el.symbol for el in e.composition.elements])
self.logger.warning(
f"Phase diagram error in chemsys {'-'.join(sorted(set(elsyms)))}: {p}"
)
return []
except Exception as e:
self.logger.error(
f"Got unexpected error while processing {[ent_.entry_id for ent_ in entries]}: {e}"
)
return []
return docs_pd_pair
def update_targets(self, items):
"""
Inserts the thermo and phase diagram docs into the thermo collection
Args:
items ([[tuple(List[dict],List[dict])]]): a list of list of thermo dictionaries to update
"""
# print(len(items))
thermo_docs = [item[0] for item in items]
phase_diagram_docs = [item[1] for item in items]
# flatten out lists
thermo_docs = list(filter(None, chain.from_iterable(thermo_docs)))
phase_diagram_docs = list(filter(None, chain.from_iterable(phase_diagram_docs)))
# Check if already updated this run
thermo_docs = [
i for i in thermo_docs if i["material_id"] not in self._completed_tasks
]
self._completed_tasks |= {i["material_id"] for i in thermo_docs}
for item in thermo_docs:
if isinstance(item["last_updated"], dict):
item["last_updated"] = MontyDecoder().process_decoded(
item["last_updated"]
)
if self.phase_diagram:
self.phase_diagram.update(phase_diagram_docs)
if len(thermo_docs) > 0:
self.logger.info(f"Updating {len(thermo_docs)} thermo documents")
self.thermo.update(docs=thermo_docs, key=["material_id"])
else:
self.logger.info("No thermo items to update")
def get_entries(self, chemsys: str) -> List[Dict]:
"""
Gets a entries from the tasks collection for the corresponding chemical systems
Args:
chemsys(str): a chemical system represented by string elements seperated by a dash (-)
Returns:
set(ComputedEntry): a set of entries for this system
"""
self.logger.info(f"Getting entries for: {chemsys}")
# First check the cache
all_chemsys = chemsys_permutations(chemsys)
cached_chemsys = all_chemsys & set(self._entries_cache.keys())
query_chemsys = all_chemsys - cached_chemsys
all_entries = list(
chain.from_iterable(self._entries_cache[c] for c in cached_chemsys)
)
self.logger.debug(
f"Getting {len(cached_chemsys)} sub-chemsys from cache for {chemsys}"
)
self.logger.debug(
f"Getting {len(query_chemsys)} sub-chemsys from DB for {chemsys}"
)
# Second grab the materials docs
new_q = dict(self.query)
new_q["chemsys"] = {"$in": list(query_chemsys)}
new_q["deprecated"] = False
materials_docs = list(
self.materials.query(
criteria=new_q, properties=["material_id", "entries", "deprecated"]
)
)
# Get Oxidation state data for each material
oxi_states_data = {}
if self.oxidation_states:
material_ids = [t["material_id"] for t in materials_docs]
oxi_states_data = {
d["material_id"]: d.get("average_oxidation_states", {})
for d in self.oxidation_states.query(
properties=["material_id", "average_oxidation_states"],
criteria={
"material_id": {"$in": material_ids},
"state": "successful",
},
)
}
self.logger.debug(
f"Got {len(materials_docs)} entries from DB for {len(query_chemsys)} sub-chemsys for {chemsys}"
)
# Convert the entries into ComputedEntries and store
for doc in materials_docs:
for r_type, entry_dict in doc.get("entries", {}).items():
entry_dict["data"]["oxidation_states"] = oxi_states_data.get(
entry_dict["entry_id"], {}
)
entry_dict["data"]["run_type"] = r_type
elsyms = sorted(set([el for el in entry_dict["composition"]]))
self._entries_cache["-".join(elsyms)].append(entry_dict)
all_entries.append(entry_dict)
self.logger.info(f"Total entries in {chemsys} : {len(all_entries)}")
return all_entries
def get_updated_chemsys(self,) -> Set:
"""Gets updated chemical system as defined by the updating of an existing material"""
updated_mats = self.thermo.newer_in(self.materials, criteria=self.query)
updated_chemsys = set(
self.materials.distinct(
"chemsys", {"material_id": {"$in": list(updated_mats)}, **self.query}
)
)
self.logger.debug(f"Found {len(updated_chemsys)} updated chemical systems")
return updated_chemsys
def get_new_chemsys(self) -> Set:
"""Gets newer chemical system as defined by introduction of a new material"""
# All materials that are not present in the thermo collection
thermo_mat_ids = self.thermo.distinct("material_id")
mat_ids = self.materials.distinct("material_id", self.query)
dif_task_ids = list(set(mat_ids) - set(thermo_mat_ids))
q = {"material_id": {"$in": dif_task_ids}}
new_mat_chemsys = set(self.materials.distinct("chemsys", q))
self.logger.debug(f"Found {len(new_mat_chemsys)} new chemical systems")
return new_mat_chemsys
def get_affected_chemsys(self, chemical_systems: Set) -> Set:
"""Gets chemical systems affected by changes in the supplied chemical systems"""
# First get all chemsys with any of the elements we've marked
affected_chemsys = set()
affected_els = list({el for c in chemical_systems for el in c.split("-")})
possible_affected_chemsys = self.materials.distinct(
"chemsys", {"elements": {"$in": affected_els}}
)
sub_chemsys = defaultdict(list)
# Build a dictionary mapping sub_chemsys to all super_chemsys
for chemsys in possible_affected_chemsys:
for permutation in chemsys_permutations(chemsys):
sub_chemsys[permutation].append(chemsys)
# Select and merge distinct super chemsys from sub_chemsys
for chemsys in chemical_systems:
affected_chemsys |= set(sub_chemsys[chemsys])
self.logger.debug(
f"Found {len(affected_chemsys)} chemical systems affected by this build"
)
return affected_chemsys
|
py | 1a391493100d669079b6b1c2e52dbaa457676556 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ResetSgPortRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'ResetSgPort')
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId)
def get_OperateType(self):
return self.get_query_params().get('OperateType')
def set_OperateType(self,OperateType):
self.add_query_param('OperateType',OperateType)
def get_SourceIp(self):
return self.get_query_params().get('SourceIp')
def set_SourceIp(self,SourceIp):
self.add_query_param('SourceIp',SourceIp) |
py | 1a3916cd84e17ec2835b6428a09d931c0d2243c9 | import json
import types
import requests
class SMS:
def __init__(self, api_key, api_password):
self.api_key = api_key
self.api_password = api_password
self.url = 'https://api.simpay.pl/sms'
self.headers = {
'X-SIM-KEY': self.api_key,
'X-SIM-PASSWORD': self.api_password
}
# https://docs-new.simpay.pl/python/?python#sms-pobieranie-listy-uslug
def get_service_list(self):
result = []
r = requests.get(self.url, headers=self.headers)
data = r.json()
result.extend(data.data)
while data.pagination.links.next_page is not None:
params = { 'page': data.pagination.current_page + 1 }
r = requests.get(self.url, headers=self.headers, params=params)
data = r.json()
result.extend(data.data)
return r.json()
def get_service_list_paginated(self, page=None, limit=None):
params = types.SimpleNamespace()
if page:
params.page = page
if limit:
params.limit = limit
r = requests.get(self.url, headers=self.headers, params=params)
return r.json()
# https://docs-new.simpay.pl/python/?python#sms-pobieranie-informacji-o-usludze
def get_service(self, service_id):
r = requests.get(self.url + '/' + service_id, headers=self.headers)
return r.json().data
# https://docs-new.simpay.pl/python/?python#sms-pobieranie-listy-transakcji
def get_transaction_list(self, service_id):
result = []
r = requests.get(self.url + '/' + service_id + '/transactions', headers=self.headers)
data = r.json()
result.extend(data.data)
while data.pagination.links.next_page is not None:
params = { 'page': data.pagination.current_page + 1 }
r = requests.get(self.url + '/' + service_id + '/transactions', headers=self.headers, params=params)
data = r.json()
result.extend(data.data)
return r.json()
def get_transaction_list_paginated(self, service_id, page=None, limit=None):
params = types.SimpleNamespace()
if page:
params.page = page
if limit:
params.limit = limit
r = requests.get(self.url + '/' + service_id + '/transactions', headers=self.headers, params=params)
return r.json()
# https://docs-new.simpay.pl/python/?python#sms-pobieranie-informacji-o-transakcji
def get_transaction(self, service_id, transaction_id):
r = requests.get(self.url + '/' + service_id + '/transactions/' + transaction_id, headers=self.headers)
return r.json().data
# https://docs-new.simpay.pl/python/?python#sms-pobieranie-dostepnych-numerow-dla-uslugi
def get_service_numbers(self, service_id):
result = []
r = requests.get(self.url + '/sms/' + service_id + '/numbers', headers=self.headers)
data = r.json()
result.extend(data.data)
while data.pagination.links.next_page is not None:
params = { 'page': data.pagination.current_page + 1 }
r = requests.get(self.url + '/sms/' + service_id + '/numbers', headers=self.headers, params=params)
data = r.json()
result.extend(data.data)
return r.json()
def get_service_numbers_paginated(self, service_id, page=None, limit=None):
params = types.SimpleNamespace()
if page:
params.page = page
if limit:
params.limit = limit
r = requests.get(self.url + '/sms/' + service_id + '/numbers', headers=self.headers, params=params)
return r.json()
# https://docs-new.simpay.pl/python/?python#sms-informacji-o-pojedynczym-numerze-uslugi
def get_service_number(self, service_id, number):
r = requests.get(self.url + '/sms/' + service_id + '/numbers/' + number, headers=self.headers)
return r.json().data
# https://docs-new.simpay.pl/python/?python#sms-pobieranie-wszystkich-dostepnych-numerow
def get_numbers(self):
result = []
r = requests.get(self.url + '/numbers', headers=self.headers)
data = r.json()
result.extend(data.data)
while data.pagination.links.next_page is not None:
params = { 'page': data.pagination.current_page + 1 }
r = requests.get(self.url + '/numbers', headers=self.headers, params=params)
data = r.json()
result.extend(data.data)
return r.json()
def get_numbers_paginated(self, page=None, limit=None):
params = types.SimpleNamespace()
if page:
params.page = page
if limit:
params.limit = limit
r = requests.get(self.url + '/numbers', headers=self.headers, params=params)
return r.json()
# https://docs-new.simpay.pl/python/?python#sms-pobieranie-pojedynczego-numeru-sms
def get_number(self, number):
r = requests.get(self.url + '/numbers/' + number, headers=self.headers)
return r.json().data
# https://docs-new.simpay.pl/python/?python#sms-weryfikacja-poprawnosci-kodu
def verify_sms_code(self, service_id, code, number=None):
body = types.SimpleNamespace()
body.code = code
if number:
body.number = number
r = requests.post(self.url + '/' + service_id, headers=self.headers, data=json.dumps(body))
return r.json().data
|
py | 1a39172f1ee4c2e6ed3de6f119bd4fc128421537 | import csv
def save_minimal_pairs(output_filename, to_output, write_header=True):
if isinstance(output_filename, str):
outf = open(output_filename, mode='w', encoding='utf-8-sig', newline='')
needs_closed = True
else:
outf = output_filename
needs_closed = False
writer = csv.writer(outf, delimiter='\t')
if write_header:
writer.writerow(['FIRST_SEGMENT', 'SECOND_SEGMENT',
'FIRST_WORD', 'FIRST_WORD_TRANSCRIPTION',
'SECOND_WORD', 'SECOND_WORD_TRANSCRIPTION'])
for _, _, ret_dict in to_output:
for seg_pair, word_pair_set in ret_dict.items():
for word_pair in word_pair_set:
writer.writerow([seg_pair[0], seg_pair[1],
word_pair[0][0], word_pair[0][1],
word_pair[1][0], word_pair[1][1]])
if needs_closed:
outf.close()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.