ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40a8bac9ae3be4384cff522e3651d9d0c3a35ec | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import uuid
import jsonschema
import pytest
from flask import Flask, request, jsonify
from pytest_localserver.http import WSGIServer
from rasa_core import utils
from rasa_core.nlg.callback import (
nlg_request_format_spec,
CallbackNaturalLanguageGenerator)
from rasa_core.utils import EndpointConfig
from rasa_core.agent import Agent
from tests.conftest import DEFAULT_ENDPOINTS_FILE
def nlg_app(base_url="/"):
app = Flask(__name__)
@app.route(base_url, methods=['POST'])
def generate():
"""Simple HTTP NLG generator, checks that the incoming request
is format according to the spec."""
nlg_call = request.json
jsonschema.validate(nlg_call, nlg_request_format_spec())
if nlg_call.get("template") == "utter_greet":
response = {"text": "Hey there!"}
else:
response = {"text": "Sorry, didn't get that."}
return jsonify(response)
return app
@pytest.fixture(scope="module")
def http_nlg(request):
http_server = WSGIServer(application=nlg_app())
http_server.start()
request.addfinalizer(http_server.stop)
return http_server.url
def test_nlg(http_nlg, default_agent_path):
sender = str(uuid.uuid1())
nlg_endpoint = EndpointConfig.from_dict({
"url": http_nlg
})
agent = Agent.load(default_agent_path, None,
generator=nlg_endpoint)
response = agent.handle_message("/greet", sender_id=sender)
assert len(response) == 1
assert response[0] == {"text": "Hey there!", "recipient_id": sender}
def test_nlg_endpoint_config_loading():
cfg = utils.read_endpoint_config(DEFAULT_ENDPOINTS_FILE, "nlg")
assert cfg == EndpointConfig.from_dict({
"url": "http://localhost:5055/nlg"
})
def test_nlg_schema_validation():
content = {"text": "Hey there!"}
assert CallbackNaturalLanguageGenerator.validate_response(content)
def test_nlg_schema_validation_empty_buttons():
content = {"text": "Hey there!", "buttons": []}
assert CallbackNaturalLanguageGenerator.validate_response(content)
def test_nlg_schema_validation_empty_image():
content = {"text": "Hey there!", "image": None}
assert CallbackNaturalLanguageGenerator.validate_response(content)
|
py | b40a8d52d41632df430738b116048d5666b3d1e0 | from collections import deque
class Visitor:
def __init__(self, ast, max_depth=0, breadth_first=False):
self.ast = ast
self.max_depth = max_depth
self.breadth_first = breadth_first
def get_method(self, method_name):
method = getattr(self, method_name, None)
return method if callable(method) else None
def visit(self, ast_node=None):
if ast_node is None:
ast_node = self.ast
method = self.get_method(f"visit_{type(ast_node).__name__}")
if method is not None:
method(ast_node)
else:
self.visit_children(ast_node)
def visit_children(self, ast_node):
for c in ast_node.children():
self.visit(c)
def yield_elements_breadth_first(self, ast_node=None):
nodes_to_visit = deque()
if ast_node is None:
ast_node = self.ast
nodes_to_visit.append(ast_node)
while nodes_to_visit:
next_node = nodes_to_visit.popleft() # nodes_to_visit.pop() for depth-first traversal
yield next_node
for c in next_node.children():
nodes_to_visit.append(c)
def yield_elements(self, ast, depth):
if depth < self.max_depth or self.max_depth == 0:
for child in ast.children():
yield child
yield from self.yield_elements(child, depth + 1)
def __iter__(self):
if self.breadth_first:
yield from self.yield_elements_breadth_first(self.ast)
else:
yield self.ast
yield from self.yield_elements(self.ast, 1)
|
py | b40a8f3aefb4d57faadb8f859dbcf11172a70c34 | from copy import deepcopy
from datetime import datetime
from pystac.errors import STACTypeError
from typing import (
Any,
Dict,
Iterable,
List,
Optional,
TYPE_CHECKING,
Tuple,
TypeVar,
Union,
cast,
)
import dateutil.parser
from dateutil import tz
import pystac
from pystac import STACObjectType, CatalogType
from pystac.asset import Asset
from pystac.catalog import Catalog
from pystac.layout import HrefLayoutStrategy
from pystac.link import Link
from pystac.provider import Provider
from pystac.utils import datetime_to_str
from pystac.serialization import (
identify_stac_object_type,
identify_stac_object,
migrate_to_latest,
)
from pystac.summaries import Summaries
if TYPE_CHECKING:
from pystac.item import Item as Item_Type
from pystac.provider import Provider as Provider_Type
T = TypeVar("T")
class SpatialExtent:
"""Describes the spatial extent of a Collection.
Args:
bboxes : A list of bboxes that represent the spatial
extent of the collection. Each bbox can be 2D or 3D. The length of the bbox
array must be 2*n where n is the number of dimensions. For example, a
2D Collection with only one bbox would be [[xmin, ymin, xmax, ymax]]
extra_fields : Dictionary containing additional top-level fields defined on the
Spatial Extent object.
"""
bboxes: List[List[float]]
"""A list of bboxes that represent the spatial
extent of the collection. Each bbox can be 2D or 3D. The length of the bbox
array must be 2*n where n is the number of dimensions. For example, a
2D Collection with only one bbox would be [[xmin, ymin, xmax, ymax]]"""
extra_fields: Dict[str, Any]
"""Dictionary containing additional top-level fields defined on the Spatial
Extent object."""
def __init__(
self,
bboxes: Union[List[List[float]], List[float]],
extra_fields: Optional[Dict[str, Any]] = None,
) -> None:
# A common mistake is to pass in a single bbox instead of a list of bboxes.
# Account for this by transforming the input in that case.
if isinstance(bboxes, list) and isinstance(bboxes[0], float):
self.bboxes: List[List[float]] = [cast(List[float], bboxes)]
else:
self.bboxes = cast(List[List[float]], bboxes)
self.extra_fields = extra_fields or {}
def to_dict(self) -> Dict[str, Any]:
"""Generate a dictionary representing the JSON of this SpatialExtent.
Returns:
dict: A serialization of the SpatialExtent that can be written out as JSON.
"""
d = {"bbox": self.bboxes, **self.extra_fields}
return d
def clone(self) -> "SpatialExtent":
"""Clones this object.
Returns:
SpatialExtent: The clone of this object.
"""
return SpatialExtent(
bboxes=deepcopy(self.bboxes), extra_fields=deepcopy(self.extra_fields)
)
@staticmethod
def from_dict(d: Dict[str, Any]) -> "SpatialExtent":
"""Constructs a SpatialExtent from a dict.
Returns:
SpatialExtent: The SpatialExtent deserialized from the JSON dict.
"""
return SpatialExtent(
bboxes=d["bbox"], extra_fields={k: v for k, v in d.items() if k != "bbox"}
)
@staticmethod
def from_coordinates(
coordinates: List[Any], extra_fields: Optional[Dict[str, Any]] = None
) -> "SpatialExtent":
"""Constructs a SpatialExtent from a set of coordinates.
This method will only produce a single bbox that covers all points
in the coordinate set.
Args:
coordinates : Coordinates to derive the bbox from.
extra_fields : Dictionary containing additional top-level fields defined on
the SpatialExtent object.
Returns:
SpatialExtent: A SpatialExtent with a single bbox that covers the
given coordinates.
"""
def process_coords(
coord_lists: List[Any],
xmin: Optional[float] = None,
ymin: Optional[float] = None,
xmax: Optional[float] = None,
ymax: Optional[float] = None,
) -> Tuple[Optional[float], Optional[float], Optional[float], Optional[float]]:
for coord in coord_lists:
if isinstance(coord[0], list):
xmin, ymin, xmax, ymax = process_coords(
coord, xmin, ymin, xmax, ymax
)
else:
x, y = coord
if xmin is None or x < xmin:
xmin = x
elif xmax is None or xmax < x:
xmax = x
if ymin is None or y < ymin:
ymin = y
elif ymax is None or ymax < y:
ymax = y
return xmin, ymin, xmax, ymax
xmin, ymin, xmax, ymax = process_coords(coordinates)
if xmin is None or ymin is None or xmax is None or ymax is None:
raise ValueError(
f"Could not determine bounds from coordinate sequence {coordinates}"
)
return SpatialExtent(
bboxes=[[xmin, ymin, xmax, ymax]], extra_fields=extra_fields
)
class TemporalExtent:
"""Describes the temporal extent of a Collection.
Args:
intervals : A list of two datetimes wrapped in a list,
representing the temporal extent of a Collection. Open date ranges are
supported by setting either the start (the first element of the interval)
or the end (the second element of the interval) to None.
extra_fields : Dictionary containing additional top-level fields defined on the
Temporal Extent object.
Note:
Datetimes are required to be in UTC.
"""
intervals: List[List[Optional[datetime]]]
"""A list of two datetimes wrapped in a list,
representing the temporal extent of a Collection. Open date ranges are
represented by either the start (the first element of the interval) or the
end (the second element of the interval) being None."""
extra_fields: Dict[str, Any]
"""Dictionary containing additional top-level fields defined on the Temporal
Extent object."""
def __init__(
self,
intervals: Union[List[List[Optional[datetime]]], List[Optional[datetime]]],
extra_fields: Optional[Dict[str, Any]] = None,
):
# A common mistake is to pass in a single interval instead of a
# list of intervals. Account for this by transforming the input
# in that case.
if isinstance(intervals, list) and isinstance(intervals[0], datetime):
self.intervals = [cast(List[Optional[datetime]], intervals)]
else:
self.intervals = cast(List[List[Optional[datetime]]], intervals)
self.extra_fields = extra_fields or {}
def to_dict(self) -> Dict[str, Any]:
"""Generate a dictionary representing the JSON of this TemporalExtent.
Returns:
dict: A serialization of the TemporalExtent that can be written out as JSON.
"""
encoded_intervals: List[List[Optional[str]]] = []
for i in self.intervals:
start = None
end = None
if i[0] is not None:
start = datetime_to_str(i[0])
if i[1] is not None:
end = datetime_to_str(i[1])
encoded_intervals.append([start, end])
d = {"interval": encoded_intervals, **self.extra_fields}
return d
def clone(self) -> "TemporalExtent":
"""Clones this object.
Returns:
TemporalExtent: The clone of this object.
"""
return TemporalExtent(
intervals=deepcopy(self.intervals), extra_fields=deepcopy(self.extra_fields)
)
@staticmethod
def from_dict(d: Dict[str, Any]) -> "TemporalExtent":
"""Constructs an TemporalExtent from a dict.
Returns:
TemporalExtent: The TemporalExtent deserialized from the JSON dict.
"""
parsed_intervals: List[List[Optional[datetime]]] = []
for i in d["interval"]:
start = None
end = None
if i[0]:
start = dateutil.parser.parse(i[0])
if i[1]:
end = dateutil.parser.parse(i[1])
parsed_intervals.append([start, end])
return TemporalExtent(
intervals=parsed_intervals,
extra_fields={k: v for k, v in d.items() if k != "interval"},
)
@staticmethod
def from_now() -> "TemporalExtent":
"""Constructs an TemporalExtent with a single open interval that has
the start time as the current time.
Returns:
TemporalExtent: The resulting TemporalExtent.
"""
return TemporalExtent(
intervals=[[datetime.utcnow().replace(microsecond=0), None]]
)
class Extent:
"""Describes the spatiotemporal extents of a Collection.
Args:
spatial : Potential spatial extent covered by the collection.
temporal : Potential temporal extent covered by the collection.
extra_fields : Dictionary containing additional top-level fields defined on the
Extent object.
"""
spatial: SpatialExtent
"""Potential spatial extent covered by the collection."""
temporal: TemporalExtent
"""Potential temporal extent covered by the collection."""
extra_fields: Dict[str, Any]
"""Dictionary containing additional top-level fields defined on the Extent
object."""
def __init__(
self,
spatial: SpatialExtent,
temporal: TemporalExtent,
extra_fields: Optional[Dict[str, Any]] = None,
):
self.spatial = spatial
self.temporal = temporal
self.extra_fields = extra_fields or {}
def to_dict(self) -> Dict[str, Any]:
"""Generate a dictionary representing the JSON of this Extent.
Returns:
dict: A serialization of the Extent that can be written out as JSON.
"""
d = {
"spatial": self.spatial.to_dict(),
"temporal": self.temporal.to_dict(),
**self.extra_fields,
}
return d
def clone(self) -> "Extent":
"""Clones this object.
Returns:
Extent: The clone of this extent.
"""
return Extent(
spatial=self.spatial.clone(),
temporal=self.temporal.clone(),
extra_fields=deepcopy(self.extra_fields),
)
@staticmethod
def from_dict(d: Dict[str, Any]) -> "Extent":
"""Constructs an Extent from a dict.
Returns:
Extent: The Extent deserialized from the JSON dict.
"""
return Extent(
spatial=SpatialExtent.from_dict(d["spatial"]),
temporal=TemporalExtent.from_dict(d["temporal"]),
extra_fields={
k: v for k, v in d.items() if k not in {"spatial", "temporal"}
},
)
@staticmethod
def from_items(
items: Iterable["Item_Type"], extra_fields: Optional[Dict[str, Any]] = None
) -> "Extent":
"""Create an Extent based on the datetimes and bboxes of a list of items.
Args:
items : A list of items to derive the extent from.
extra_fields : Optional dictionary containing additional top-level fields
defined on the Extent object.
Returns:
Extent: An Extent that spatially and temporally covers all of the
given items.
"""
bounds_values: List[List[float]] = [
[float("inf")],
[float("inf")],
[float("-inf")],
[float("-inf")],
]
datetimes: List[datetime] = []
starts: List[datetime] = []
ends: List[datetime] = []
for item in items:
if item.bbox is not None:
for i in range(0, 4):
bounds_values[i].append(item.bbox[i])
if item.datetime is not None:
datetimes.append(item.datetime)
if item.common_metadata.start_datetime is not None:
starts.append(item.common_metadata.start_datetime)
if item.common_metadata.end_datetime is not None:
ends.append(item.common_metadata.end_datetime)
if not any(datetimes + starts):
start_timestamp = None
else:
start_timestamp = min(
[
dt if dt.tzinfo else dt.replace(tzinfo=tz.UTC)
for dt in datetimes + starts
]
)
if not any(datetimes + ends):
end_timestamp = None
else:
end_timestamp = max(
[
dt if dt.tzinfo else dt.replace(tzinfo=tz.UTC)
for dt in datetimes + ends
]
)
spatial = SpatialExtent(
[
[
min(bounds_values[0]),
min(bounds_values[1]),
max(bounds_values[2]),
max(bounds_values[3]),
]
]
)
temporal = TemporalExtent([[start_timestamp, end_timestamp]])
return Extent(spatial=spatial, temporal=temporal, extra_fields=extra_fields)
class Collection(Catalog):
"""A Collection extends the Catalog spec with additional metadata that helps
enable discovery.
Args:
id : Identifier for the collection. Must be unique within the STAC.
description : Detailed multi-line description to fully explain the
collection. `CommonMark 0.28 syntax <https://commonmark.org/>`_ MAY
be used for rich text representation.
extent : Spatial and temporal extents that describe the bounds of
all items contained within this Collection.
title : Optional short descriptive one-line title for the
collection.
stac_extensions : Optional list of extensions the Collection
implements.
href : Optional HREF for this collection, which be set as the
collection's self link's HREF.
catalog_type : Optional catalog type for this catalog. Must
be one of the values in :class`~pystac.CatalogType`.
license : Collection's license(s) as a
`SPDX License identifier <https://spdx.org/licenses/>`_,
`various`, or `proprietary`. If collection includes
data with multiple different licenses, use `various` and add a link for
each. Defaults to 'proprietary'.
keywords : Optional list of keywords describing the collection.
providers : Optional list of providers of this Collection.
summaries : An optional map of property summaries,
either a set of values or statistics such as a range.
extra_fields : Extra fields that are part of the top-level
JSON properties of the Collection.
"""
assets: Dict[str, Asset]
"""Map of Assets"""
description: str
"""Detailed multi-line description to fully explain the collection."""
extent: Extent
"""Spatial and temporal extents that describe the bounds of all items contained
within this Collection."""
id: str
"""Identifier for the collection."""
stac_extensions: List[str]
"""List of extensions the Collection implements."""
title: Optional[str]
"""Optional short descriptive one-line title for the collection."""
keywords: Optional[List[str]]
"""Optional list of keywords describing the collection."""
providers: Optional[List[Provider]]
"""Optional list of providers of this Collection."""
summaries: Summaries
"""A map of property summaries, either a set of values or statistics such as a
range."""
links: List[Link]
"""A list of :class:`~pystac.Link` objects representing all links associated with
this Collection."""
extra_fields: Dict[str, Any]
"""Extra fields that are part of the top-level JSON properties of the Collection."""
STAC_OBJECT_TYPE = STACObjectType.COLLECTION
DEFAULT_FILE_NAME = "collection.json"
"""Default file name that will be given to this STAC object
in a canonical format."""
def __init__(
self,
id: str,
description: str,
extent: Extent,
title: Optional[str] = None,
stac_extensions: Optional[List[str]] = None,
href: Optional[str] = None,
extra_fields: Optional[Dict[str, Any]] = None,
catalog_type: Optional[CatalogType] = None,
license: str = "proprietary",
keywords: Optional[List[str]] = None,
providers: Optional[List["Provider_Type"]] = None,
summaries: Optional[Summaries] = None,
):
super().__init__(
id,
description,
title,
stac_extensions,
extra_fields,
href,
catalog_type or CatalogType.ABSOLUTE_PUBLISHED,
)
self.extent = extent
self.license = license
self.stac_extensions: List[str] = stac_extensions or []
self.keywords = keywords
self.providers = providers
self.summaries = summaries or Summaries.empty()
self.assets = {}
def __repr__(self) -> str:
return "<Collection id={}>".format(self.id)
def add_item(
self,
item: "Item_Type",
title: Optional[str] = None,
strategy: Optional[HrefLayoutStrategy] = None,
) -> None:
super().add_item(item, title, strategy)
item.set_collection(self)
def to_dict(
self, include_self_link: bool = True, transform_hrefs: bool = True
) -> Dict[str, Any]:
d = super().to_dict(
include_self_link=include_self_link, transform_hrefs=transform_hrefs
)
d["extent"] = self.extent.to_dict()
d["license"] = self.license
if self.stac_extensions is not None:
d["stac_extensions"] = self.stac_extensions
if self.keywords is not None:
d["keywords"] = self.keywords
if self.providers is not None:
d["providers"] = list(map(lambda x: x.to_dict(), self.providers))
if not self.summaries.is_empty():
d["summaries"] = self.summaries.to_dict()
if any(self.assets):
d["assets"] = {k: v.to_dict() for k, v in self.assets.items()}
return d
def clone(self) -> "Collection":
cls = self.__class__
clone = cls(
id=self.id,
description=self.description,
extent=self.extent.clone(),
title=self.title,
stac_extensions=self.stac_extensions,
extra_fields=self.extra_fields,
catalog_type=self.catalog_type,
license=self.license,
keywords=self.keywords,
providers=self.providers,
summaries=self.summaries,
)
clone._resolved_objects.cache(clone)
for link in self.links:
if link.rel == pystac.RelType.ROOT:
# Collection __init__ sets correct root to clone; don't reset
# if the root link points to self
root_is_self = link.is_resolved() and link.target is self
if not root_is_self:
clone.set_root(None)
clone.add_link(link.clone())
else:
clone.add_link(link.clone())
return clone
@classmethod
def from_dict(
cls,
d: Dict[str, Any],
href: Optional[str] = None,
root: Optional[Catalog] = None,
migrate: bool = False,
preserve_dict: bool = True,
) -> "Collection":
if migrate:
info = identify_stac_object(d)
d = migrate_to_latest(d, info)
if not cls.matches_object_type(d):
raise STACTypeError(f"{d} does not represent a {cls.__name__} instance")
catalog_type = CatalogType.determine_type(d)
if preserve_dict:
d = deepcopy(d)
id = d.pop("id")
description = d.pop("description")
license = d.pop("license")
extent = Extent.from_dict(d.pop("extent"))
title = d.get("title")
stac_extensions = d.get("stac_extensions")
keywords = d.get("keywords")
providers = d.get("providers")
if providers is not None:
providers = list(map(lambda x: pystac.Provider.from_dict(x), providers))
summaries = d.get("summaries")
if summaries is not None:
summaries = Summaries(summaries)
assets: Optional[Dict[str, Any]] = d.get("assets", None)
links = d.pop("links")
d.pop("stac_version")
collection = cls(
id=id,
description=description,
extent=extent,
title=title,
stac_extensions=stac_extensions,
extra_fields=d,
license=license,
keywords=keywords,
providers=providers,
summaries=summaries,
href=href,
catalog_type=catalog_type,
)
for link in links:
if link["rel"] == pystac.RelType.ROOT:
# Remove the link that's generated in Catalog's constructor.
collection.remove_links(pystac.RelType.ROOT)
if link["rel"] != pystac.RelType.SELF or href is None:
collection.add_link(Link.from_dict(link))
if assets is not None:
for asset_key, asset_dict in assets.items():
collection.add_asset(asset_key, Asset.from_dict(asset_dict))
if root:
collection.set_root(root)
return collection
def get_assets(self) -> Dict[str, Asset]:
"""Get this item's assets.
Returns:
Dict[str, Asset]: A copy of the dictionary of this item's assets.
"""
return dict(self.assets.items())
def add_asset(self, key: str, asset: Asset) -> None:
"""Adds an Asset to this item.
Args:
key : The unique key of this asset.
asset : The Asset to add.
"""
asset.set_owner(self)
self.assets[key] = asset
def update_extent_from_items(self) -> None:
"""
Update datetime and bbox based on all items to a single bbox and time window.
"""
self.extent = Extent.from_items(self.get_all_items())
def full_copy(
self, root: Optional["Catalog"] = None, parent: Optional["Catalog"] = None
) -> "Collection":
return cast(Collection, super().full_copy(root, parent))
@classmethod
def from_file(
cls, href: str, stac_io: Optional[pystac.StacIO] = None
) -> "Collection":
result = super().from_file(href, stac_io)
if not isinstance(result, Collection):
raise pystac.STACTypeError(f"{result} is not a {Collection}.")
return result
@classmethod
def matches_object_type(cls, d: Dict[str, Any]) -> bool:
return identify_stac_object_type(d) == STACObjectType.COLLECTION
|
py | b40a8faccb9744522bf363cebd7971e50e954c92 | # coding: utf-8
"""
Fulfillment API
Use the Fulfillment API to complete the process of packaging, addressing, handling, and shipping each order on behalf of the seller, in accordance with the payment method and timing specified at checkout. # noqa: E501
OpenAPI spec version: v1.19.9
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Tax(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'amount': 'Amount',
'tax_type': 'str'
}
attribute_map = {
'amount': 'amount',
'tax_type': 'taxType'
}
def __init__(self, amount=None, tax_type=None): # noqa: E501
"""Tax - a model defined in Swagger""" # noqa: E501
self._amount = None
self._tax_type = None
self.discriminator = None
if amount is not None:
self.amount = amount
if tax_type is not None:
self.tax_type = tax_type
@property
def amount(self):
"""Gets the amount of this Tax. # noqa: E501
:return: The amount of this Tax. # noqa: E501
:rtype: Amount
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this Tax.
:param amount: The amount of this Tax. # noqa: E501
:type: Amount
"""
self._amount = amount
@property
def tax_type(self):
"""Gets the tax_type of this Tax. # noqa: E501
Tax type. This field is only available when <strong>fieldGroups</strong> is set to <code>TAX_BREAKDOWN</code>. If the order has fees, a breakdown of the fees is also provided. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/sell/fulfillment/types/sel:TaxTypeEnum'>eBay API documentation</a> # noqa: E501
:return: The tax_type of this Tax. # noqa: E501
:rtype: str
"""
return self._tax_type
@tax_type.setter
def tax_type(self, tax_type):
"""Sets the tax_type of this Tax.
Tax type. This field is only available when <strong>fieldGroups</strong> is set to <code>TAX_BREAKDOWN</code>. If the order has fees, a breakdown of the fees is also provided. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/sell/fulfillment/types/sel:TaxTypeEnum'>eBay API documentation</a> # noqa: E501
:param tax_type: The tax_type of this Tax. # noqa: E501
:type: str
"""
self._tax_type = tax_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Tax, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Tax):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b40a907d32e5f93dadee381281468fafa381c77f | # ----------------------------------------------------------------------
# MIT License
# Copyright (c) 2018 Lee Min Hua. All rights reserved.
# Author: Lee Min Hua
# E-mail: [email protected]
# This code is originally from Jin-Man Park and Jong-Hwan Kim's Github repository:
# (https://github.com/chickenbestlover/Online-Recurrent-Extreme-Learning-Machine)
# And modified to run Online Recurrent Extreme Learning Machine with Adaptive Forgetting Factor and Genetic Algorithm
# (ORELM-AFF-GA)
# ----------------------------------------------------------------------
"""
Implementation of Online Recurrent Extreme Learning Machine (OR-ELM) with Adaptive Forgetting Factor (AFF)
"""
import numpy as np
from numpy.linalg import pinv
from numpy.linalg import inv
from FOS_ELM import FOSELM
import math
def orthogonalization(Arr):
[Q, S, _] = np.linalg.svd(Arr)
tol = max(Arr.shape) * np.spacing(max(S))
r = np.sum(S > tol)
Q = Q[:, :r]
def linear_recurrent(features, inputW, hiddenW, hiddenA, bias):
(numSamples, numInputs) = features.shape
(numHiddenNeuron, numInputs) = inputW.shape
V = np.dot(features, np.transpose(inputW)) + np.dot(hiddenA, hiddenW)
for i in range(numHiddenNeuron):
V[:, i] += bias[0, i]
return V
def sigmoidActFunc(V):
H = 1 / (1 + np.exp(-V))
return H
class ORELM_AFF(object):
def __init__(self, inputs, outputs, numHiddenNeurons, activationFunction, min_t, max_t, LN=True, AE=True, ORTH=True,
inputWeightForgettingFactor=0.999,
outputWeightForgettingFactor=0.999,
hiddenWeightForgettingFactor=0.999):
self.min_t = min_t
self.max_t = max_t
self.activationFunction = activationFunction
self.inputs = inputs
self.outputs = outputs
self.numHiddenNeurons = numHiddenNeurons
# input to hidden weights
self.inputWeights = np.random.random((self.numHiddenNeurons, self.inputs))
# hidden layer to hidden layer weights
self.hiddenWeights = np.random.random((self.numHiddenNeurons, self.numHiddenNeurons))
# initial hidden layer activation
self.initial_H = np.random.random((1, self.numHiddenNeurons)) * 2 - 1
self.H = self.initial_H
self.LN = LN
self.AE = AE
self.ORTH = ORTH
# bias of hidden units
self.bias = np.random.random((1, self.numHiddenNeurons)) * 2 - 1
self.forgettingFactor = outputWeightForgettingFactor
self.FFmin = 0.9
self.FFmax = 0.999
self.trace = 0
self.thresReset = 0.001
if self.AE:
self.inputAE = FOSELM(inputs=inputs,
outputs=inputs,
numHiddenNeurons=numHiddenNeurons,
activationFunction=activationFunction,
LN=LN,
forgettingFactor=inputWeightForgettingFactor,
ORTH=ORTH
)
self.hiddenAE = FOSELM(inputs=numHiddenNeurons,
outputs=numHiddenNeurons,
numHiddenNeurons=numHiddenNeurons,
activationFunction=activationFunction,
LN=LN,
ORTH=ORTH
)
def layerNormalization(self, H, scaleFactor=1, biasFactor=0):
H_normalized = (H - H.mean()) / (np.sqrt(H.var() + 0.000001))
H_normalized = scaleFactor * H_normalized + biasFactor
return H_normalized
def __calculateInputWeightsUsingAE(self, features):
self.inputAE.train(features=features, targets=features)
return self.inputAE.beta
def __calculateHiddenWeightsUsingAE(self, features):
self.hiddenAE.train(features=features, targets=features)
return self.hiddenAE.beta
def calculateHiddenLayerActivation(self, features):
"""
Calculate activation level of the hidden layer
:param features feature matrix with dimension (numSamples, numInputs)
:return: activation level (numSamples, numHiddenNeurons)
"""
if self.activationFunction is "sig":
if self.AE:
self.inputWeights = self.__calculateInputWeightsUsingAE(features)
self.hiddenWeights = self.__calculateHiddenWeightsUsingAE(self.H)
V = linear_recurrent(features=features,
inputW=self.inputWeights,
hiddenW=self.hiddenWeights,
hiddenA=self.H,
bias=self.bias)
if self.LN:
V = self.layerNormalization(V)
self.H = sigmoidActFunc(V)
else:
print " Unknown activation function type"
raise NotImplementedError
return self.H
def initializePhase(self, lamb=0.0001):
"""
Step 1: Initialization phase
:param features feature matrix with dimension (numSamples, numInputs)
:param targets target matrix with dimension (numSamples, numOutputs)
"""
if self.activationFunction is "sig":
self.bias = np.random.random((1, self.numHiddenNeurons)) * 2 - 1
else:
print " Unknown activation function type"
raise NotImplementedError
self.M = inv(lamb * np.eye(self.numHiddenNeurons))
self.beta = np.zeros([self.numHiddenNeurons, self.outputs])
# randomly initialize the input->hidden connections
self.inputWeights = np.random.random((self.numHiddenNeurons, self.inputs))
self.inputWeights = self.inputWeights * 2 - 1
if self.AE:
self.inputAE.initializePhase(lamb=0.00001)
self.hiddenAE.initializePhase(lamb=0.00001)
else:
# randomly initialize the input->hidden connections
self.inputWeights = np.random.random((self.numHiddenNeurons, self.inputs))
self.inputWeights = self.inputWeights * 2 - 1
if self.ORTH:
if self.numHiddenNeurons > self.inputs:
self.inputWeights = orthogonalization(self.inputWeights)
else:
self.inputWeights = orthogonalization(self.inputWeights.transpose())
self.inputWeights = self.inputWeights.transpose()
# hidden layer to hidden layer wieghts
self.hiddenWeights = np.random.random((self.numHiddenNeurons, self.numHiddenNeurons))
self.hiddenWeights = self.hiddenWeights * 2 - 1
if self.ORTH:
self.hiddenWeights = orthogonalization(self.hiddenWeights)
def reset(self):
self.H = self.initial_H
def train(self, features, targets, RESETTING=False):
"""
Step 2: Sequential learning phase
:param features feature matrix with dimension (numSamples, numInputs)
:param targets target matrix with dimension (numSamples, numOutputs)
"""
(numSamples, numOutputs) = targets.shape
assert features.shape[0] == targets.shape[0]
H = self.calculateHiddenLayerActivation(features)
Ht = np.transpose(H)
h = H[0]
ht = np.transpose(h)
target = targets[0]
self.error = np.transpose(target) - np.dot(ht, (self.forgettingFactor) * self.beta)
try:
# update forgetting factor
self.forgettingFactor = self.FFmin + (1 - self.FFmin) * math.exp(-6 * abs((self.error)))
if (self.forgettingFactor < self.FFmin):
self.forgettingFactor = self.FFmin
if (self.forgettingFactor > self.FFmax):
self.forgettingFactor = self.FFmax
scale = 1 / (self.forgettingFactor)
self.M = scale * self.M - np.dot(scale * self.M,
np.dot(Ht, np.dot(
pinv(np.eye(numSamples) + np.dot(H, np.dot(scale * self.M, Ht))),
np.dot(H, scale * self.M))))
self.beta = (self.forgettingFactor) * self.beta + np.dot(self.M, np.dot(Ht, targets - np.dot(H, (
self.forgettingFactor) * self.beta)))
if RESETTING:
beforeTrace = self.trace
self.trace = self.M.trace()
print np.abs(beforeTrace - self.trace)
if np.abs(beforeTrace - self.trace) < self.thresReset:
print self.M
eig, _ = np.linalg.eig(self.M)
lambMin = min(eig)
lambMax = max(eig)
# lamb = (lambMax+lambMin)/2
lamb = lambMax
lamb = lamb.real
self.M = lamb * np.eye(self.numHiddenNeurons)
print "reset"
print self.M
except np.linalg.linalg.LinAlgError:
print "SVD not converge, ignore the current training cycle"
# else:
# raise RuntimeError
def predict(self, features):
"""
Make prediction with feature matrix
:param features: feature matrix with dimension (numSamples, numInputs)
:return: predictions with dimension (numSamples, numOutputs)
"""
H = self.calculateHiddenLayerActivation(features)
prediction = np.dot(H, self.beta)
return prediction
|
py | b40a9182ff597ebe46a6beecefbf34cd053f5521 | from . import io, photometric
from .io import *
from .photometric import *
from .spatial import * |
py | b40a91b906145e146555e962e17708517ade0601 | #!/usr/bin/env python
#ckwg +28
# Copyright 2012-2020 by Kitware, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither name of Kitware, Inc. nor the names of any contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from kwiver.sprokit.util.test import expect_exception, find_tests, run_test, test_error
# TEST_PROPERTY(WILL_FAIL, TRUE)
def test_return_code():
import sys
sys.exit(1)
# TEST_PROPERTY(WILL_FAIL, TRUE)
def test_error_string():
test_error('an error')
def test_error_string_mid():
import sys
sys.stderr.write('Test')
test_error('an error')
# TEST_PROPERTY(WILL_FAIL, TRUE)
def test_error_string_stdout():
import sys
sys.stdout.write('Error: an error\n')
# TEST_PROPERTY(WILL_FAIL, TRUE)
def test_error_string_second_line():
import sys
sys.stderr.write('Not an error\n')
test_error("an error")
def raise_exception():
raise NotImplementedError
def test_expected_exception():
expect_exception('when throwing an exception', NotImplementedError,
raise_exception)
# TEST_PROPERTY(WILL_FAIL, TRUE)
def test_unexpected_exception():
expect_exception('when throwing an unexpected exception', SyntaxError,
raise_exception)
# TEST_PROPERTY(ENVIRONMENT, TEST_ENVVAR=test_value)
def test_environment():
import os
envvar = 'TEST_ENVVAR'
if envvar not in os.environ:
test_error('failed to get environment from CTest')
else:
expected = 'test_value'
envvalue = os.environ[envvar]
if envvalue != expected:
test_error('did not get expected value')
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
test_error("Expected two arguments")
sys.exit(1)
testname = sys.argv[1]
run_test(testname, find_tests(locals()))
|
py | b40a91bb406b019189a627b62ed5fb1150c12df2 | # qubit number=5
# total number=47
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[0]) # number=41
prog.cz(input_qubit[1],input_qubit[0]) # number=42
prog.h(input_qubit[0]) # number=43
prog.z(input_qubit[1]) # number=37
prog.cx(input_qubit[1],input_qubit[0]) # number=38
prog.h(input_qubit[4]) # number=21
prog.cx(input_qubit[0],input_qubit[2]) # number=44
prog.x(input_qubit[2]) # number=45
prog.cx(input_qubit[0],input_qubit[2]) # number=46
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[3],input_qubit[0]) # number=33
prog.z(input_qubit[3]) # number=34
prog.x(input_qubit[4]) # number=40
prog.cx(input_qubit[3],input_qubit[0]) # number=35
prog.x(input_qubit[0]) # number=9
prog.cx(input_qubit[0],input_qubit[1]) # number=29
prog.x(input_qubit[1]) # number=30
prog.cx(input_qubit[0],input_qubit[1]) # number=31
prog.x(input_qubit[2]) # number=11
prog.x(input_qubit[3]) # number=12
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.x(input_qubit[0]) # number=25
prog.cx(input_qubit[1],input_qubit[0]) # number=26
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.x(input_qubit[1]) # number=22
prog.y(input_qubit[1]) # number=32
prog.x(input_qubit[1]) # number=23
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit1057.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
py | b40a91fce490fcbb4ca1bc9e4029ea131b4bcd0a | import re
from rest_framework.test import APITestCase
from django.urls import reverse
from django.core import mail
from ..models import User
from .factories import UnactivatedUserFactory, ActivatedUserFactory, ClosedAccountFactory
class UserTests(APITestCase):
def test_create_superuser(self):
admin = User.objects.create_superuser(
email='[email protected]',
password='123qwe',
)
self.assertEqual(admin.is_superuser, True)
self.assertEqual(admin.is_staff, True)
self.assertEqual(admin.check_password('123qwe'), True)
self.assertEqual(str(admin), '[email protected]')
self.assertEqual(admin.get_short_name(), '[email protected]')
admin.first_name = 'Ahmed'
admin.last_name = 'Saad'
admin.save()
self.assertEqual(admin.get_short_name(), 'Ahmed')
self.assertEqual(str(admin), 'Ahmed Saad')
self.assertEqual(admin.get_full_name(), 'Ahmed Saad')
def test_user_registration_flow(self):
# test system has no users
self.assertEqual(User.objects.all().count(), 0)
# test user signup
response = self.client.post(
reverse('auth:register'),
data={
'email': '[email protected]',
'password': '123qwe'
}
)
self.assertEqual(response.status_code, 201)
# test user exists
self.assertEqual(User.objects.all().count(), 1)
# test user can't login
response = self.client.post(
reverse('auth:login'),
data={
'email': '[email protected]',
'password': '123qwe'
}
)
self.assertEqual(response.status_code, 400, response.content)
# Activate user manually until we find a way to test emails
user = User.objects.first()
user.is_active = True
user.save()
# test user can login after activation
response = self.client.post(
reverse('auth:login'),
data={
'email': '[email protected]',
'password': '123qwe'
}
)
self.assertEqual(response.status_code, 200, response.content)
def test_user_can_login_and_see_profile(self):
user = ActivatedUserFactory.create()
response = self.client.post(
reverse('auth:login'),
data={
'email': user.email,
'password': '123qwe'
}
)
self.assertEqual(response.status_code, 200)
token = 'Token ' + response.json()['auth_token']
self.client.credentials(HTTP_AUTHORIZATION=token)
# Now, user should be logged-in
response = self.client.get(
reverse('auth:user'),
)
self.assertEqual(response.status_code, 200)
def test_loggedin_user_can_disable_account(self):
user = ActivatedUserFactory.create()
self.client.login(email=user.email, password='123qwe')
# Now, user should be logged-in
response = self.client.post(
reverse('auth:disable_account')
)
self.assertEqual(response.status_code, 200)
# call it again to make sure user isn't authenticated anymore
response = self.client.post(
reverse('auth:disable_account')
)
self.assertEqual(response.status_code, 401)
def test_user_can_reactivate_account(self):
user = ClosedAccountFactory()
# test user can't login
self.assertFalse(self.client.login(email=user.email, password='123qwe'))
# call login with activate=True
response = self.client.post(
reverse('auth:login'),
data={
'email': user.email,
'password': '123qwe',
'activate': True
}
)
self.assertEqual(response.status_code, 200)
def test_unverified_users_cant_activate_their_accounts_illegally(self):
"""
This is to test that unverified users can't login by setting
activate=True
"""
user = UnactivatedUserFactory()
# call login with activate=True
response = self.client.post(
reverse('auth:login'),
data={
'email': user.email,
'password': '123qwe',
'activate': True
}
)
self.assertEqual(response.status_code, 400)
def test_unverified_users_and_wrong_credentials_and_closed_accounts_get_different_error_messages(self):
unactivated_user = UnactivatedUserFactory()
closed_account = ClosedAccountFactory()
response1 = self.client.post(
reverse('auth:login'),
data={
'email': unactivated_user.email,
'password': '123qwe',
}
).json()
response2 = self.client.post(
reverse('auth:login'),
data={
'email': closed_account.email,
'password': '123qwe',
}
).json()
self.assertNotEqual(response1, response2)
response3 = self.client.post(
reverse('auth:login'),
data={
'email': '[email protected]',
'password': '123qwe',
}
).json()
self.assertNotEqual(response1, response3)
self.assertNotEqual(response2, response3)
def test_verify_mail(self):
self.client.post(
reverse('auth:register'),
data={
'email': '[email protected]',
'password': '123qwe'
}
)
user = User.objects.last()
activation_mail = mail.outbox[0]
pattern = re.compile(r"http:\/\/testdomain.somewhere.com\/auth\/activate\/(?P<uid>[\w_-]*)\/(?P<token>[\w_-]*)")
match = pattern.search(activation_mail.body)
self.assertEqual(user.email_verified, False)
self.assertEqual(user.is_active, False)
response = self.client.post(
path=reverse('auth:activate'),
data=match.groupdict()
)
user.refresh_from_db()
self.assertEqual(response.status_code, 204)
self.assertEqual(user.is_active, True)
self.assertEqual(user.email_verified, True)
|
py | b40a926a696f784ce4088144904dc061478a9a86 | import os
import sys
import hmmpy
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'HMMpy'
copyright = '2021, Christian Stolborg, Mathias Joergensen'
author = 'Christian Stolborg & Mathias Joergensen'
# -- General configuration ---------------------------------------------------
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'sphinx_math_dollar'
]
autodoc_default_options = {'members': None, 'inherited-members': None}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
#html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
if html_theme == 'alabaster':
html_theme_options = {
'description':
'Hidden Markov Models for unsupervised learning',
'github_user': 'Cstolborg',
'github_repo': 'HMMpy',
'github_banner': True,
'github_button': False,
'code_font_size': '80%',
}
elif html_theme == 'sphinx_rtd_theme':
html_theme_options = {
'display_version': True,
'collapse_navigation': False,
'sticky_navigation': False,
}
#html_context = {
# 'display_github': True,
# 'github_user': 'Cstolborg',
# 'github_repo': 'hmmpy'
#}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] |
py | b40a92ff8155f3ed07442df19a33aaf1a7952f31 | # Copyright 2011-2016 Josh Kearney
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pyhole Log Unit Tests"""
import os
import unittest
from pyhole.core import logger
from pyhole.core import utils
class TestLogger(unittest.TestCase):
def test_logger(self):
test_log_dir = utils.get_home_directory() + "logs/"
try:
# NOTE(jk0): If the configuration file doesn't exist, the config
# class will generate it and raise a SystemExit.
logger.setup_logger("test")
except SystemExit:
logger.setup_logger("test")
test_log = logger.get_logger("TEST")
self.assertEqual("TEST", test_log.name)
self.assertEqual(test_log.level, 0)
os.unlink(test_log_dir + "test.log")
|
py | b40a931011fcfedff1e7597f91d5d0e06e84f044 | # /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2019, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" Create visualizations on the weights in each conv and linear layer in a model"""
import os
import numpy as np
import pandas as pd
import torch
from tensorboardX import SummaryWriter
import matplotlib.pyplot as plt
def switch_backend():
"""
switches to the appropriate backend
:return: None
"""
backend = plt.get_backend()
plt.switch_backend(backend)
def has_display():
"""
checks to see if there is a display in the current environment
:return: None
"""
return "DISPLAY" in os.environ
def get_weights(conv_module):
"""
Returns the weights of a conv_module in a 2d matrix, where each column is an output channel.
:param conv_module: convNd module
:return: 2d numpy array
"""
axis_0_length = conv_module.weight.shape[0]
axis_1_length = np.prod(conv_module.weight.shape[1:])
reshaped = conv_module.weight.reshape(int(axis_0_length), int(axis_1_length))
weights = reshaped.detach().numpy().T
return weights
def visualize_module_boxplot(conv_module, name_str, x_stepsize=5, num_y_ticks=20):
"""
Add boxplots for each output channel on tensorboard under images tab.
:param conv_module: convNd module
:param name_str: figure name as will show on tensorboard images tab
:param: x_stepsize: Size of spacing between values on x axis;the distance between two adjacent values on x axis.
:param: num_y_ticks: number of evenly spaced samples, calculated over the interval [y_min, y_max].
:return: None
"""
if not has_display():
return
switch_backend()
plt.clf()
arr = get_weights(conv_module)
fig = plt.figure(figsize=adjust_figure_size(arr.shape[1]))
plt.boxplot(arr)
y_min, y_max = np.ndarray.min(arr), np.ndarray.max(arr)
y_ticks = np.linspace(y_min, y_max, num_y_ticks)
plt.yticks(y_ticks)
x_ticks = np.arange(0, arr.shape[1] + 1, x_stepsize)
plt.xticks(x_ticks, x_ticks)
plt.xlabel("Output Channels")
plt.ylabel("Weight Range")
plt.title("Conv Module Output Channels")
writer = SummaryWriter("./data")
writer.add_figure(name_str, fig, walltime=1)
writer.close()
return
def visualize_module_lineplot(conv_module, name_str):
"""
Create a line plot of tensor minimum and maximum lines overlayed on same plot.
Where x axis is the output channel and the y axis is the min and max values.
:param conv_module: type convNd module
:param name_str: figure name as will show on tensorboard images tab
:return: None
"""
if not has_display():
return
switch_backend()
plt.clf()
df_weights = pd.DataFrame(get_weights(conv_module)).describe()
# fig = plt.figure(figsize=adjust_figure_size(data.shape[1]))
fig = plt.figure(figsize=(15, 10))
x = list(range(len(df_weights)))
plt.plot(x, df_weights["max"])
plt.plot(x, df_weights["min"])
# plt.plot(x, df_weights["25th percentile"])
plt.plot(x, df_weights["50th percentile"])
# plt.plot(x, df_weights["75th percentile"])
plt.xlabel("Output Channels")
plt.ylabel("Weight Range")
plt.title("Conv Module Output Channels")
plt.legend(loc='upper right')
writer = SummaryWriter("./data")
writer.add_figure(name_str, fig)
writer.close()
def create_histogram(data, name_str):
"""
:param data: python list or numpy array value
:param name_str: figure name as will show on tensorboard images tab
:return: None
"""
if not has_display():
return
switch_backend()
plt.clf()
fig, ax = plt.subplots()
ax.hist(data)
write_to_data(name_str, "", fig)
return
def create_table_from_dataframe(dataframe, name_str):
"""
Show dataframe on tensorboard under images tab with name as name_str.
:param dataframe: pandas dataframe to be shown on tensorboard
:param name_str: figure name as will show on tensorboard images tab
:return: None
"""
if not has_display():
return
switch_backend()
fig = plt.figure(figsize=(20, 20))
fig.patch.set_visible(False)
plt.axis('off')
plt.table(cellText=dataframe.values, colLabels=dataframe.columns, loc='top')
fig.tight_layout()
write_to_data(name_str, "", fig)
def get_necessary_statistics_from_dataframe(module_weights):
"""
Generates descriptive statistics summarizing central tendency, dispersion and shape of output channels distribution.
:param module_weights: module weights represented as a 2d numpy array
:return: None
"""
module_weights_as_dataframe = pd.DataFrame(module_weights)
described_dataframe = module_weights_as_dataframe.describe().drop(index="count")
return described_dataframe
def compare_overall_model_changes_violinplot(before_module_weights, after_module_weights, tab_name, subplot_name):
"""
Creates two violin plots, one for all weight ranges before quantization and one for after.
:param before_module_weights: pandas dataframe of all weights in module before quantization
:param after_module_weights: pandas dataframe of all weights in module after quantization
:param tab_name: The name of the tab in which the subplot will show on tensorboard
:param subplot_name: The name of the subplot under the tab on tensorboard
:return: None
"""
if not has_display():
return
switch_backend()
fig, ax1 = plt.subplots(1, figsize=(8, 5))
before_weights_flattened = np.ndarray.flatten(before_module_weights).tolist()
after_weights_flattened = np.ndarray.flatten(after_module_weights).tolist()
ax1.violinplot([before_weights_flattened, after_weights_flattened], showmeans=True, showextrema=True)
ax1.set_xticks([1, 2])
ax1.set_xticklabels(["before quantization ranges", "after quantization ranges"])
plt.tight_layout()
write_to_data(tab_name, subplot_name, fig)
def compare_key_stats_scatter_plot(before_module_weights_statistics, after_module_weights_statistics, tab_name,
subplot_name):
"""
Plots mean vs standard deviation and min vs max befor and after quantization.
:param before_module_weights_statistics: pandas dataframe of all weights in module before quantization
:param after_module_weights_statistics: pandas dataframe of all weights in module after quantization
:param tab_name: The name of the tab in which the subplot will show on tensorboard
:param subplot_name: The name of the subplot under the tab on tensorboard
:return: None
"""
if not has_display():
return
switch_backend()
plt.clf()
# Returns a tuple containing a figure and axes object(s).
# Unpack this tuple into the variables fig and ax1,ax2
fig, (ax1, ax2) = plt.subplots(2, 2, figsize=(12, 8))
# Row 1: scatter plots before quantization
ax1[0].scatter(before_module_weights_statistics.loc['mean'], before_module_weights_statistics.loc['std'], alpha=.3,
color='orange')
ax1[0].set(xlabel='mean weights', ylabel='std weights', title='Before Quantization Mean vs Std')
ax1[1].scatter(before_module_weights_statistics.loc['min'], before_module_weights_statistics.loc['max'], alpha=.3,
color='steelblue')
ax1[1].set(xlabel='min weights', ylabel='max weights', title='Before Quantization Min vs Max')
# Row 2: scatter plots after quantization
ax2[0].scatter(after_module_weights_statistics.loc['mean'], after_module_weights_statistics.loc['std'], alpha=.3,
color='orange')
ax2[0].set(xlabel='mean weights', ylabel='std weights', title='After Quantization Mean vs Std')
ax2[1].scatter(after_module_weights_statistics.loc['min'], after_module_weights_statistics.loc['max'], alpha=.3,
color='steelblue')
ax2[1].set(xlabel='min weights', ylabel='max weights', title='After Quantization Min vs Max')
y_lower_bound = min(np.min(before_module_weights_statistics.loc['max']),
np.min(after_module_weights_statistics.loc['max']))
y_upper_bound = max(np.max(before_module_weights_statistics.loc['max']),
np.max(after_module_weights_statistics.loc['max']))
x_lower_bound = min(np.min(before_module_weights_statistics.loc['min']),
np.min(after_module_weights_statistics.loc['min']))
x_upper_bound = max(np.max(before_module_weights_statistics.loc['min']),
np.max(after_module_weights_statistics.loc['min']))
ax1[1].set_ylim(y_lower_bound - .1, y_upper_bound + .1)
ax1[1].set_xlim(x_lower_bound - .1, x_upper_bound + .1)
ax2[1].set_ylim(y_lower_bound - .1, y_upper_bound + .1)
ax2[1].set_xlim(x_lower_bound - .1, x_upper_bound + .1)
plt.tight_layout()
write_to_data(tab_name, subplot_name, fig)
return
def compare_overall_changes_line_plot(before_module_weights_statistics, after_module_weights_statistics, tab_name,
subplot_name):
"""
Compares the weight ranges before and after quantization of conv2d and linear modules,
given pandas dataframes before and after quantization
:param before_module_weights_statistics: pandas dataframe of all weights in module before quantization
:param after_module_weights_statistics: pandas dataframe of all weights in module after quantization
:param tab_name: The name of the tab in which the subplot will show on tensorboard
:param subplot_name: The name of the subplot under the tab on tensorboard
:return: None
"""
if not has_display():
return
switch_backend()
fig, ax1 = plt.subplots(1, figsize=(14, 12))
count_col = before_module_weights_statistics.shape[1] # count number of columns
output_channels = list(range(count_col))
ax1.plot(output_channels, before_module_weights_statistics.loc['min'], color='khaki', label="min")
ax1.plot(output_channels, after_module_weights_statistics.loc['min'], color='darkgoldenrod', label="new minimum")
ax1.fill_between(output_channels, before_module_weights_statistics.loc['min'], after_module_weights_statistics.loc['min'],
color='orange', alpha=0.2)
ax1.plot(output_channels, before_module_weights_statistics.loc['mean'], color='steelblue', label="mean")
ax1.plot(output_channels, after_module_weights_statistics.loc['mean'], color='darkcyan', label="new mean")
ax1.fill_between(output_channels, before_module_weights_statistics.loc['mean'], after_module_weights_statistics.loc['mean'],
color='steelblue', alpha=0.2)
ax1.plot(output_channels, before_module_weights_statistics.loc['max'], color='lightgreen', label="max")
ax1.plot(output_channels, after_module_weights_statistics.loc['max'], color='green', label="new max")
ax1.fill_between(output_channels, before_module_weights_statistics.loc['max'], after_module_weights_statistics.loc['max'],
color='green', alpha=0.2)
ax1.legend(loc="upper right")
plt.tight_layout()
write_to_data(tab_name, subplot_name, fig)
def write_to_data(tab_name, subplot_name, fig):
"""
Writes the figure object as an event file in the data directory.
:param tab_name: Name of tab on tensorboard
:param subplot_name: Name of subplot inside tab
:param fig: Figure object on matplotlib
:return: None
"""
tag = tab_name + "/" + subplot_name
if subplot_name == "":
tag = tab_name
writer = SummaryWriter("./data")
writer.add_figure(tag, fig)
writer.close()
def compare_boxplots_before_after_quantization(before_data, after_data, tab_name, subplot_name):
"""
Compares the weight ranges before and after quantization of conv2d and linear modules, \
given a 2d numpy array of weights before and after quantization.
:param before_data: Before quantization weights numpy array
:param after_data: After quantization weights numpy array
:param tab_name: The name of the tab in which the subplot will show on tensorboard
:param subplot_name: The name of the subplot under the tab on tensorboard
:return: None
"""
if not has_display():
return
switch_backend()
count_col = before_data.shape[1]
fig, (ax1, ax2) = plt.subplots(2, figsize=adjust_figure_size(count_col))
ax1.boxplot(before_data, patch_artist=True)
ax2.boxplot(after_data, patch_artist=True)
xticks = np.arange(0, count_col + 1, 10)
ax1.xaxis.set_ticks(xticks)
ax1.set_xticklabels(xticks)
ax2.xaxis.set_ticks(xticks)
ax2.set_xticklabels(xticks)
ax1.set(xlabel="Output Channels Before Quantization", ylabel="Weight Ranges")
ax2.set(xlabel="Output Channels After Quantization", ylabel="Weight Ranges")
plt.tight_layout()
write_to_data(tab_name, subplot_name, fig)
def map_all_module_weights(model):
"""
Returns a python dictionary mapping each conv2d module and linear module in the input model to its weights.
:param model: pytorch model
:return: None
"""
module_weights_map = {}
for name, module in model.named_modules():
if isinstance(module, (torch.nn.modules.conv.Conv2d, torch.nn.modules.linear.Linear)):
module_weights = get_weights(module)
module_weights_map[name] = module_weights
return module_weights_map
def adjust_figure_size(num_channels):
"""
Adjusts the figure size of single plot graphs
:param num_channels: Number of channels to be plotted
:return: A tuple, to be passed into figure size so that all channels can be displayed in a plot.
"""
base_length = 15
base_width = 10
num_inches_to_add = num_channels / 60
length = base_length + num_inches_to_add
return length, base_width
def before_after_plots_for_quantized_model(before_weights_map, after_weights_map):
"""
Creates two event files, one for boxplots and another for all other visualization for quantization.
:param before_weights_map: python dictionary where module name is key and values are weights before quantization
:param after_weights_map: python dictionary where module name is key and values are weights before quantization
:return: None
"""
for key in before_weights_map.keys():
before_quantization_data = before_weights_map[key]
after_quantization_data = after_weights_map[key]
compare_boxplots_before_after_quantization(before_quantization_data, after_quantization_data,
tab_name=key, subplot_name="Boxplots")
before_quantization_as_dataframe = get_necessary_statistics_from_dataframe(before_quantization_data)
after_quantization_as_dataframe = get_necessary_statistics_from_dataframe(after_quantization_data)
compare_overall_model_changes_violinplot(before_quantization_data, after_quantization_data, tab_name=key,
subplot_name="Violin")
compare_overall_changes_line_plot(before_quantization_as_dataframe, after_quantization_as_dataframe,
tab_name=key,
subplot_name="Line")
compare_key_stats_scatter_plot(before_quantization_as_dataframe, after_quantization_as_dataframe, tab_name=key,
subplot_name="Scatter")
def clear_event_files(my_path):
"""
Removes all tensorflow event files in the specified directory
:param my_path: path from current directory
:return: None
"""
for root_dir_file_tuple in os.walk(my_path):
root = root_dir_file_tuple[0]
files = root_dir_file_tuple[2]
for file in files:
if file[:19] == "events.out.tfevents":
os.remove(os.path.join(root, file))
|
py | b40a93d14505d03e69d36f44e01fab882db49c3c | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from tkinter import *
fenetre = Tk()
label = Label(fenetre, text="Hello World")
label.pack()
bouton=Button(fenetre, text="Fermer", command=fenetre.quit)
bouton.pack()
fenetre.mainloop()
|
py | b40a944be2952215f3d4150fa87fd44f2c9f92e0 | # -*- coding: utf-8 -*-
"""Runs the server in uwsgi or http modes.
Also supports starting nginx proxy.
:copyright: Copyright (c) 2015 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkcli
from pykern import pkcollections
from pykern import pkconfig
from pykern import pkio
from pykern import pkjinja
from pykern import pksubprocess
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdc, pkdexc, pkdp, pkdlog
import contextlib
import os
import py
import re
import signal
import socket
import subprocess
import time
__cfg = None
def flask():
from sirepo import server
import sirepo.pkcli.setup_dev
with pkio.save_chdir(_run_dir()) as r:
sirepo.pkcli.setup_dev.default_command()
# above will throw better assertion, but just in case
assert pkconfig.channel_in('dev')
app = server.init(use_reloader=_cfg().use_reloader, is_server=True)
# avoid WARNING: Do not use the development server in a production environment.
app.env = 'development'
import werkzeug.serving
werkzeug.serving.click = None
app.run(
exclude_patterns=[str(r.join('*'))],
host=_cfg().ip,
port=_cfg().port,
threaded=True,
use_reloader=_cfg().use_reloader,
)
def http():
"""Starts the Flask server and job_supervisor.
Used for development only.
"""
@contextlib.contextmanager
def _handle_signals(signums):
o = [(x, signal.getsignal(x)) for x in signums]
try:
[signal.signal(x[0], _kill) for x in o]
yield
finally:
[signal.signal(x[0], x[1]) for x in o]
def _kill(*args):
for p in processes:
try:
p.terminate()
p.wait(1)
except (ProcessLookupError, ChildProcessError):
continue
except subprocess.TimeoutExpired:
p.kill()
def _start(service):
c = ['pyenv', 'exec', 'sirepo']
c.extend(service)
processes.append(subprocess.Popen(
c,
cwd=str(_run_dir()),
env=e,
))
e = PKDict(os.environ)
e.SIREPO_JOB_DRIVER_MODULES = 'local'
processes = []
with pkio.save_chdir(_run_dir()), \
_handle_signals((signal.SIGINT, signal.SIGTERM)):
try:
_start(['job_supervisor'])
# Avoid race condition on creating auth db
time.sleep(.3)
_start(['service', 'flask'])
p, _ = os.wait()
except ChildProcessError:
pass
finally:
_kill()
def jupyterhub():
import importlib
import sirepo.template
assert pkconfig.channel_in('dev')
sirepo.template.assert_sim_type('jupyterhublogin')
# POSIT: versions same in container-beamsim-jupyter/build.sh
# Order is important: jupyterlab-server should be last so it isn't
# overwritten with a newer version.
for m, v in ('jupyterhub', '1.1.0'), ('jupyterlab', '2.1.0 jupyterlab-server==1.2.0'):
try:
importlib.import_module(m)
except ModuleNotFoundError:
pkcli.command_error(
'{}: not installed run `pip install {}=={}`',
m,
m,
v,
)
import sirepo.sim_api.jupyterhublogin
import sirepo.server
sirepo.server.init()
with pkio.save_chdir(_run_dir().join('jupyterhub').ensure(dir=True)) as d:
pksubprocess.check_call_with_signals((
'jupyter',
'serverextension',
'enable',
'--py',
'jupyterlab',
'--sys-prefix',
))
f = d.join('conf.py')
pkjinja.render_resource(
'jupyterhub_conf.py',
PKDict(_cfg()).pkupdate(**sirepo.sim_api.jupyterhublogin.cfg),
output=f,
)
pksubprocess.check_call_with_signals(('jupyterhub', '-f', str(f)))
def nginx_proxy():
"""Starts nginx in container.
Used for development only.
"""
import sirepo.template
assert pkconfig.channel_in('dev')
run_dir = _run_dir().join('nginx_proxy').ensure(dir=True)
with pkio.save_chdir(run_dir) as d:
f = run_dir.join('default.conf')
c = PKDict(_cfg()).pkupdate(run_dir=str(d))
if sirepo.template.is_sim_type('jupyterhublogin'):
import sirepo.sim_api.jupyterhublogin
import sirepo.server
sirepo.server.init()
c.pkupdate(
jupyterhub_root=sirepo.sim_api.jupyterhublogin.cfg.uri_root,
)
pkjinja.render_resource('nginx_proxy.conf', c, output=f)
cmd = [
'nginx',
'-c',
str(f),
]
pksubprocess.check_call_with_signals(cmd)
def uwsgi():
"""Starts UWSGI server"""
run_dir = _run_dir()
with pkio.save_chdir(run_dir):
values = _cfg().copy()
values['logto'] = None if pkconfig.channel_in('dev') else str(run_dir.join('uwsgi.log'))
# uwsgi.py must be first, because values['uwsgi_py'] referenced by uwsgi.yml
for f in ('uwsgi.py', 'uwsgi.yml'):
output = run_dir.join(f)
values[f.replace('.', '_')] = str(output)
pkjinja.render_resource(f, values, output=output)
cmd = ['uwsgi', '--yaml=' + values['uwsgi_yml']]
pksubprocess.check_call_with_signals(cmd)
def _cfg():
global __cfg
if not __cfg:
__cfg = pkconfig.init(
ip=('0.0.0.0', _cfg_ip, 'what IP address to open'),
jupyterhub_port=(8002, _cfg_port, 'port on which jupyterhub listens'),
jupyterhub_debug=(
True,
bool,
'turn on debugging for jupyterhub (hub, spawner, ConfigurableHTTPProxy)',
),
nginx_proxy_port=(8080, _cfg_port, 'port on which nginx_proxy listens'),
port=(8000, _cfg_port, 'port on which uwsgi or http listens'),
processes=(1, _cfg_int(1, 16), 'how many uwsgi processes to start'),
run_dir=(None, str, 'where to run the program (defaults db_dir)'),
# uwsgi got hung up with 1024 threads on a 4 core VM with 4GB
# so limit to 128, which is probably more than enough with
# this application.
threads=(10, _cfg_int(1, 128), 'how many uwsgi threads in each process'),
use_reloader=(pkconfig.channel_in('dev'), bool, 'use the Flask reloader'),
)
return __cfg
def _cfg_emails(value):
"""Parse a list of emails separated by comma, colons, semicolons or spaces.
Args:
value (object): if list or tuple, use verbatim; else split
Returns:
list: validated emails
"""
import pyisemail
try:
if not isinstance(value, (list, tuple)):
value = re.split(r'[,;:\s]+', value)
except Exception:
pkcli.command_error('{}: invalid email list', value)
for v in value:
if not pyisemail.is_email(value):
pkcli.command_error('{}: invalid email', v)
def _cfg_int(lower, upper):
def wrapper(value):
v = int(value)
assert lower <= v <= upper, \
'value must be from {} to {}'.format(lower, upper)
return v
return wrapper
def _cfg_ip(value):
try:
socket.inet_aton(value)
return value
except socket.error:
pkcli.command_error('{}: ip is not a valid IPv4 address', value)
def _cfg_port(value):
return _cfg_int(5001, 32767)(value)
def _run_dir():
from sirepo import server
import sirepo.srdb
if not isinstance(_cfg().run_dir, type(py.path.local())):
_cfg().run_dir = pkio.mkdir_parent(_cfg().run_dir) if _cfg().run_dir else sirepo.srdb.root()
return _cfg().run_dir
|
py | b40a9496b832bc60d15325794b16dd140a220a01 | from .engine import Engine
class Parallel:
"""
Build a collection of jobs to be executed in parallel
:params: [.abstracts.job.Job] items
"""
def __init__(self, *items):
self.items = items
# Execute synchronous jobs
def execute(self):
return Engine().execute(self.items)
# Dispatches synchronous jobs
def dispatch(self):
return Engine().dispatch(self.items)
|
py | b40a952cebcff422ad7b061e2d88ad7e81e53c4a | from random import shuffle
from BaseAI_3 import BaseAI
from Displayer_3 import Displayer
import time
from Grid_3 import directionVectors
import math
# computerAi para
tileVal = [2, 4] # tile value
grid_size = 4
prob = 0.9
# computerAi para
tileVal = [2, 4] # tile value
grid_size = 4
prob = 0.9
class PlayerAI(BaseAI):
def getMove(self, grid):
solver = Solver(heuristics_fun)
return solver.solve(grid)
class Solver():
def __init__(self, estimateFun, max_turn=16, maxTime=0.18):
self.max_dep = max_turn
self.estimateFun = estimateFun
self.time = time.clock()
self.maxTime = maxTime
def solve(self, grid):
m = self.maximize(grid, self.max_dep)[0]
if m is None:
moves = grid.getAvailableMoves()
shuffle(moves)
return moves[0]
return m
def terminal_test(self, actions, dep):
return dep == 0 or len(actions) == 0 or time.clock() - self.time > self.maxTime
def minimize(self, grid, dep):
# self.dep +=1
cells = grid.getAvailableCells()
if self.terminal_test(cells, dep):
return self.estimateFun(grid)
utility = 0
for cell in cells:
child = grid.clone() # grid is not change
# for val in tileVal:
child.setCellValue(cell, tileVal[0])
u1 = self.maximize(child, dep - 1)[1]
child.setCellValue(cell, tileVal[1])
u2 = self.maximize(child, dep - 1)[1]
utility += prob * u1 + (1 - prob) * u2
return utility / len(cells)
def maximize(self, grid, dep):
# self.dep +=1
moves = grid.getAvailableMoves()
if self.terminal_test(moves, dep):
return (None, self.estimateFun(grid))
max_utility = -1
mov = None
shuffle(moves)
for m in moves:
child = grid.clone()
if not child.move(m):
continue
utility = self.minimize(child, dep - 1)
# print("minimize utility = ", utility)
if utility > max_utility:
max_utility = utility
mov = m
return (mov, max_utility)
# some helper function
# some selected para
max_power = 20
weight = [2.5 ** 5] + [2.5 ** i for i in range(max_power)]
# weight matrix of position
weight_mat = [[13, 9, 6, 4],
[9, 6, 4, 2],
[6, 4, 2, 1],
[4, 2, 1, 0],
]
# estimate function
def heuristics_fun(grid):
return feature2(grid) - penalty(grid) + estimate_score(grid)
def estimate_score(grid, weight=weight):
weight[1] = weight[2] = 0
ret = 0
max_v = 0
for i in range(grid_size):
for j in range(grid_size):
idx = int(math.log2(grid.getCellValue((i, j)) + 0.0000001) + 0.5)
if idx < 0:
idx = 0
if idx > max_v:
max_v = idx
ret += weight[idx]
if idx >= 10:
ret += (1 << idx)*idx/6
ret =ret * idx /5
return ret
def feature2(grid):
ret = 0
for i in range(grid_size):
for j in range(grid_size):
val = grid.getCellValue((i, j))
if val > 4:
ret += weight_mat[i][j] * val
return ret
def penalty(grid):
ret = 0
for i in range(grid_size):
for j in range(grid_size):
cur_pos = (i, j)
val = grid.getCellValue(cur_pos)
for dir in directionVectors:
pos = (cur_pos[0] + dir[0], cur_pos[1] + dir[1])
if grid.crossBound(pos):
continue
neibor_val = grid.getCellValue(pos)
if neibor_val == val:
ret -= val
ret += abs(val - neibor_val)
return ret
|
py | b40a9564e828a375513b3b0ae99d91e543b6afc5 | import logging
from utils.logger import Logger
from utils.validation import Validator
from utils.urls import url_domain, port_parse
import requests
import os.path
from os import sep
from base64 import b64decode
from json import loads
class Parse:
"""
Parse the results of the HSTS file
"""
__path_moz = f"dependencies{sep}nsSTSPreloadList.inc"
__path_gog = f"dependencies{sep}transport_security_state_static.json"
def __init__(self, moz=True):
"""
:param moz: True if the mozilla file is to be parsed, False if the google file is to be parsed
:type moz: bool
"""
self.__cache = {}
if moz:
self.__parse_moz(self.__path_moz)
else:
self.__parse_gog(self.__path_gog)
def __parse_moz(self, path):
"""
Parse the Mozilla file
:param path: path to the Mozilla file
:type path: str
"""
if os.path.exists(path):
with open(path, "r") as file:
start_parsing = False
for line in file:
if "%%" in line: # start n stop
start_parsing = True if not start_parsing else False
if start_parsing and "%%" not in line:
if len(line.replace("\n", "").split(",")) == 2:
host, no = line.replace("\n", "").split(",")
self.__cache[host] = no
else:
raise FileNotFoundError("The file provided for mozilla HSTS doesn't exist.")
def __parse_gog(self, path):
"""
Parse the Google file
:param path: path to the Google file
:type path: str
"""
if os.path.exists(path):
with open(path, "r") as file:
raw_results = b64decode(file.read()).decode().split("\n")
gog = loads(
"\n".join(
[
line
for line in raw_results
if not line.lstrip().startswith("//")
]
)
)
for sub in gog["entries"]:
name = sub["name"]
sub.pop("name", None)
self.__cache[name] = sub
else:
raise FileNotFoundError("The file provided for google HSTS doesn't exist.")
def output(self):
"""
Return the results of the parsing
:return: dict results
:rtype: dict
"""
return self.__cache
class Https:
"""
Analyze the results of the request and return the results by choosing the right method asked
"""
HTTPS = 0
HSTSSET = 1
HSTSPRELOAD = 2
SERVERINFO = 3
__cache = {}
__preloaded_moz = {}
__preloaded_gog = {}
__output = {}
__headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/39.0.2171.95 "
"Safari/537.36"
}
def __init__(self):
self.__input_dict = {}
self.__logging = Logger("HTTPS_HSTS")
def input(self, **kwargs):
"""
Set the input parameters
:param kwargs: input parameters
:type kwargs: dict
:Keyword Arguments:
* *hostname* (``str``) -- Hostname to analyze
* *type* (``int``) -- Type of HSTS to analyze
* *port* (``int``) -- Port to analyze
* *force* (``bool``) -- Force the analysis ignoring cache
"""
self.__input_dict = kwargs
def output(self, **kwargs):
"""
Return the results of the analysis
:param kwargs: output parameters
:type kwargs: dict
:Keyword Arguments:
* *hostname* (``str``) -- Hostname to analyze
:return: dict results
:rtype: dict
"""
return (
self.__output[kwargs["hostname"]]
if "hostname" in kwargs and kwargs["hostname"] in self.__cache
else False
)
def run(self, **kwargs):
"""
Run the analysis
:param kwargs: input parameters
:type kwargs: dict
:Keyword Arguments:
* *hostname* (``str``) -- Hostname to analyze
* *type* (``int``) -- Type of HSTS to analyze
* *port* (``int``) -- Port to analyze
* *force* (``bool``) -- Force the analysis ignoring cache
:return: dict results
:rtype: dict
"""
self.input(**kwargs)
if "hostname" not in self.__input_dict:
raise AssertionError("IP or hostname args not found.")
elif "type" not in self.__input_dict:
raise AssertionError("Type args not found.")
else: # initialization of parameters
self.__input_dict["hostname"] = url_domain(self.__input_dict["hostname"])
force = (
self.__input_dict["force"] if "force" in self.__input_dict else False
)
if "port" not in self.__input_dict:
self.__input_dict["port"] = "443"
port_to_add = (
":" + port_parse(self.__input_dict["port"])
if self.__input_dict[ # self.__input_dict["type"] != self.HTTPS and
"port"
]
!= "443"
else ""
)
Validator(
[
(self.__input_dict["hostname"], str),
(force, bool),
(self.__input_dict["type"], int),
]
)
# request
link = (
f'{"http" if self.__input_dict["type"] == self.HTTPS else "https"}://'
f'{self.__input_dict["hostname"]}'
f"{port_to_add}"
)
self.__output[link] = self.__worker(
link,
self.__input_dict["type"],
force,
)
return self.output(hostname=link)
def __chose_results(self, type: int, response: requests.Response):
"""
Internal method to choose the right results
:param type: type of HSTS
:type type: int
:param response: response of the request
:type response: requests.Response
:return: dict results if not hsts analysis, else a boolean if hsts is preloaded
:rtype: dict or bool
"""
self.__logging.debug(response.headers)
if type == self.HTTPS:
return (
response.is_redirect or response.is_permanent_redirect
) and response.headers["location"].startswith("https")
elif type == self.SERVERINFO:
return response.headers["server"] if "server" in response.headers else ""
elif type == self.HSTSSET:
return "strict-transport-security" in response.headers
else:
if not self.__preloaded_moz:
self.__logging.debug("Preloading mozilla hsts..")
self.__preloaded_moz = Parse().output()
if not self.__preloaded_gog:
self.__logging.debug("Preloading google hsts..")
self.__preloaded_gog = Parse(moz=False).output()
if response.request:
parsed_url = url_domain(response.request.url)
self.__logging.debug(f"url : {parsed_url} parsed")
else:
parsed_url = None
return (
parsed_url in self.__preloaded_moz or parsed_url in self.__preloaded_gog
)
def __worker(self, link: str, type: int, force: bool):
"""
Internal method to run the analysis
:param link: link to analyze
:type link: str
:param type: type of HSTS
:type type: int
:param force: force the analysis ignoring cache
:type force: bool
:return: dict results
:rtype: dict
"""
if force:
try:
self.__cache[link] = requests.head(
link, headers=self.__headers, timeout=5
)
except requests.exceptions.SSLError as ex:
self.__logging.error(f"I can't connect to SSL/TLS:\n{ex}")
self.__logging.warning(
"The HTTPS_HSTS analysis cannot proceed and result will be set as vulnerable."
)
return self.__chose_results(
type, requests.Response()
) # default response
except (
requests.exceptions.ConnectTimeout,
requests.exceptions.ConnectTimeout,
requests.exceptions.ConnectionError,
) as ex:
self.__logging.error(f"I can't connect to host:\n{ex}")
self.__logging.warning(
"The HTTPS_HSTS analysis cannot proceed and result will be set as vulnerable."
)
return self.__chose_results(
type, requests.Response()
) # default response
else:
if link not in self.__cache:
self.__worker(link, type, force=True)
response = self.__cache[link]
if response.ok:
return self.__chose_results(type, response)
else:
self.__logging.warning(
f"Received Status Code {response.status_code}, result could not make sense."
)
return self.__chose_results(type, response)
|
py | b40a959c957de0c70b0d54c530f04e75b6d558e2 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['RegionBackendServiceArgs', 'RegionBackendService']
@pulumi.input_type
class RegionBackendServiceArgs:
def __init__(__self__, *,
region: pulumi.Input[str],
affinity_cookie_ttl_sec: Optional[pulumi.Input[int]] = None,
backends: Optional[pulumi.Input[Sequence[pulumi.Input['BackendArgs']]]] = None,
cdn_policy: Optional[pulumi.Input['BackendServiceCdnPolicyArgs']] = None,
circuit_breakers: Optional[pulumi.Input['CircuitBreakersArgs']] = None,
compression_mode: Optional[pulumi.Input['RegionBackendServiceCompressionMode']] = None,
connection_draining: Optional[pulumi.Input['ConnectionDrainingArgs']] = None,
connection_tracking_policy: Optional[pulumi.Input['BackendServiceConnectionTrackingPolicyArgs']] = None,
consistent_hash: Optional[pulumi.Input['ConsistentHashLoadBalancerSettingsArgs']] = None,
custom_request_headers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
custom_response_headers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
enable_cdn: Optional[pulumi.Input[bool]] = None,
failover_policy: Optional[pulumi.Input['BackendServiceFailoverPolicyArgs']] = None,
health_checks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
iap: Optional[pulumi.Input['BackendServiceIAPArgs']] = None,
load_balancing_scheme: Optional[pulumi.Input['RegionBackendServiceLoadBalancingScheme']] = None,
locality_lb_policy: Optional[pulumi.Input['RegionBackendServiceLocalityLbPolicy']] = None,
log_config: Optional[pulumi.Input['BackendServiceLogConfigArgs']] = None,
max_stream_duration: Optional[pulumi.Input['DurationArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
network: Optional[pulumi.Input[str]] = None,
outlier_detection: Optional[pulumi.Input['OutlierDetectionArgs']] = None,
port_name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input['RegionBackendServiceProtocol']] = None,
request_id: Optional[pulumi.Input[str]] = None,
security_settings: Optional[pulumi.Input['SecuritySettingsArgs']] = None,
service_bindings: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
session_affinity: Optional[pulumi.Input['RegionBackendServiceSessionAffinity']] = None,
subsetting: Optional[pulumi.Input['SubsettingArgs']] = None,
timeout_sec: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a RegionBackendService resource.
:param pulumi.Input[int] affinity_cookie_ttl_sec: Lifetime of cookies in seconds. This setting is applicable to external and internal HTTP(S) load balancers and Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is one day (86,400). Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.
:param pulumi.Input[Sequence[pulumi.Input['BackendArgs']]] backends: The list of backends that serve this BackendService.
:param pulumi.Input['BackendServiceCdnPolicyArgs'] cdn_policy: Cloud CDN configuration for this BackendService. Only available for specified load balancer types.
:param pulumi.Input['RegionBackendServiceCompressionMode'] compression_mode: Compress text responses using Brotli or gzip compression, based on the client's Accept-Encoding header.
:param pulumi.Input['BackendServiceConnectionTrackingPolicyArgs'] connection_tracking_policy: Connection Tracking configuration for this BackendService. Connection tracking policy settings are only available for Network Load Balancing and Internal TCP/UDP Load Balancing.
:param pulumi.Input['ConsistentHashLoadBalancerSettingsArgs'] consistent_hash: Consistent Hash-based load balancing can be used to provide soft session affinity based on HTTP headers, cookies or other properties. This load balancing policy is applicable only for HTTP connections. The affinity to a particular destination host will be lost when one or more hosts are added/removed from the destination service. This field specifies parameters that control consistent hashing. This field is only applicable when localityLbPolicy is set to MAGLEV or RING_HASH. This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED.
:param pulumi.Input[Sequence[pulumi.Input[str]]] custom_request_headers: Headers that the load balancer adds to proxied requests. See [Creating custom headers](https://cloud.google.com/load-balancing/docs/custom-headers).
:param pulumi.Input[Sequence[pulumi.Input[str]]] custom_response_headers: Headers that the load balancer adds to proxied responses. See [Creating custom headers](https://cloud.google.com/load-balancing/docs/custom-headers).
:param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource.
:param pulumi.Input[bool] enable_cdn: If true, enables Cloud CDN for the backend service of an external HTTP(S) load balancer.
:param pulumi.Input['BackendServiceFailoverPolicyArgs'] failover_policy: Requires at least one backend instance group to be defined as a backup (failover) backend. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview).
:param pulumi.Input[Sequence[pulumi.Input[str]]] health_checks: The list of URLs to the healthChecks, httpHealthChecks (legacy), or httpsHealthChecks (legacy) resource for health checking this backend service. Not all backend services support legacy health checks. See Load balancer guide. Currently, at most one health check can be specified for each backend service. Backend services with instance group or zonal NEG backends must have a health check. Backend services with internet or serverless NEG backends must not have a health check.
:param pulumi.Input['BackendServiceIAPArgs'] iap: The configurations for Identity-Aware Proxy on this resource. Not available for Internal TCP/UDP Load Balancing and Network Load Balancing.
:param pulumi.Input['RegionBackendServiceLoadBalancingScheme'] load_balancing_scheme: Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer.
:param pulumi.Input['RegionBackendServiceLocalityLbPolicy'] locality_lb_policy: The load balancing algorithm used within the scope of the locality. The possible values are: - ROUND_ROBIN: This is a simple policy in which each healthy backend is selected in round robin order. This is the default. - LEAST_REQUEST: An O(1) algorithm which selects two random healthy hosts and picks the host which has fewer active requests. - RING_HASH: The ring/modulo hash load balancer implements consistent hashing to backends. The algorithm has the property that the addition/removal of a host from a set of N hosts only affects 1/N of the requests. - RANDOM: The load balancer selects a random healthy host. - ORIGINAL_DESTINATION: Backend host is selected based on the client connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection before the connection was redirected to the load balancer. - MAGLEV: used as a drop in replacement for the ring hash load balancer. Maglev is not as stable as ring hash but has faster table lookup build times and host selection times. For more information about Maglev, see https://ai.google/research/pubs/pub44824 This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. If sessionAffinity is not NONE, and this field is not set to MAGLEV or RING_HASH, session affinity settings will not take effect. Only ROUND_ROBIN and RING_HASH are supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.
:param pulumi.Input['BackendServiceLogConfigArgs'] log_config: This field denotes the logging options for the load balancer traffic served by this backend service. If logging is enabled, logs will be exported to Stackdriver.
:param pulumi.Input['DurationArgs'] max_stream_duration: Specifies the default maximum duration (timeout) for streams to this service. Duration is computed from the beginning of the stream until the response has been completely processed, including all retries. A stream that does not complete in this duration is closed. If not specified, there will be no timeout limit, i.e. the maximum duration is infinite. This value can be overridden in the PathMatcher configuration of the UrlMap that references this backend service. This field is only allowed when the loadBalancingScheme of the backend service is INTERNAL_SELF_MANAGED.
:param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
:param pulumi.Input[str] network: The URL of the network to which this backend service belongs. This field can only be specified when the load balancing scheme is set to INTERNAL.
:param pulumi.Input['OutlierDetectionArgs'] outlier_detection: Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. If not set, this feature is considered disabled. This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.
:param pulumi.Input[str] port_name: A named port on a backend instance group representing the port for communication to the backend VMs in that group. The named port must be [defined on each backend instance group](https://cloud.google.com/load-balancing/docs/backend-service#named_ports). This parameter has no meaning if the backends are NEGs. For Internal TCP/UDP Load Balancing and Network Load Balancing, omit port_name.
:param pulumi.Input['RegionBackendServiceProtocol'] protocol: The protocol this BackendService uses to communicate with backends. Possible values are HTTP, HTTPS, HTTP2, TCP, SSL, UDP or GRPC. depending on the chosen load balancer or Traffic Director configuration. Refer to the documentation for the load balancers or for Traffic Director for more information. Must be set to GRPC when the backend service is referenced by a URL map that is bound to target gRPC proxy.
:param pulumi.Input['SecuritySettingsArgs'] security_settings: This field specifies the security policy that applies to this backend service. This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED.
:param pulumi.Input[Sequence[pulumi.Input[str]]] service_bindings: URLs of networkservices.ServiceBinding resources. Can only be set if load balancing scheme is INTERNAL_SELF_MANAGED. If set, lists of backends and health checks must be both empty.
:param pulumi.Input['RegionBackendServiceSessionAffinity'] session_affinity: Type of session affinity to use. The default is NONE. Only NONE and HEADER_FIELD are supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. For more details, see: [Session Affinity](https://cloud.google.com/load-balancing/docs/backend-service#session_affinity).
:param pulumi.Input[int] timeout_sec: Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. Instead, use maxStreamDuration.
"""
pulumi.set(__self__, "region", region)
if affinity_cookie_ttl_sec is not None:
pulumi.set(__self__, "affinity_cookie_ttl_sec", affinity_cookie_ttl_sec)
if backends is not None:
pulumi.set(__self__, "backends", backends)
if cdn_policy is not None:
pulumi.set(__self__, "cdn_policy", cdn_policy)
if circuit_breakers is not None:
pulumi.set(__self__, "circuit_breakers", circuit_breakers)
if compression_mode is not None:
pulumi.set(__self__, "compression_mode", compression_mode)
if connection_draining is not None:
pulumi.set(__self__, "connection_draining", connection_draining)
if connection_tracking_policy is not None:
pulumi.set(__self__, "connection_tracking_policy", connection_tracking_policy)
if consistent_hash is not None:
pulumi.set(__self__, "consistent_hash", consistent_hash)
if custom_request_headers is not None:
pulumi.set(__self__, "custom_request_headers", custom_request_headers)
if custom_response_headers is not None:
pulumi.set(__self__, "custom_response_headers", custom_response_headers)
if description is not None:
pulumi.set(__self__, "description", description)
if enable_cdn is not None:
pulumi.set(__self__, "enable_cdn", enable_cdn)
if failover_policy is not None:
pulumi.set(__self__, "failover_policy", failover_policy)
if health_checks is not None:
pulumi.set(__self__, "health_checks", health_checks)
if iap is not None:
pulumi.set(__self__, "iap", iap)
if load_balancing_scheme is not None:
pulumi.set(__self__, "load_balancing_scheme", load_balancing_scheme)
if locality_lb_policy is not None:
pulumi.set(__self__, "locality_lb_policy", locality_lb_policy)
if log_config is not None:
pulumi.set(__self__, "log_config", log_config)
if max_stream_duration is not None:
pulumi.set(__self__, "max_stream_duration", max_stream_duration)
if name is not None:
pulumi.set(__self__, "name", name)
if network is not None:
pulumi.set(__self__, "network", network)
if outlier_detection is not None:
pulumi.set(__self__, "outlier_detection", outlier_detection)
if port_name is not None:
pulumi.set(__self__, "port_name", port_name)
if project is not None:
pulumi.set(__self__, "project", project)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if request_id is not None:
pulumi.set(__self__, "request_id", request_id)
if security_settings is not None:
pulumi.set(__self__, "security_settings", security_settings)
if service_bindings is not None:
pulumi.set(__self__, "service_bindings", service_bindings)
if session_affinity is not None:
pulumi.set(__self__, "session_affinity", session_affinity)
if subsetting is not None:
pulumi.set(__self__, "subsetting", subsetting)
if timeout_sec is not None:
pulumi.set(__self__, "timeout_sec", timeout_sec)
@property
@pulumi.getter
def region(self) -> pulumi.Input[str]:
return pulumi.get(self, "region")
@region.setter
def region(self, value: pulumi.Input[str]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="affinityCookieTtlSec")
def affinity_cookie_ttl_sec(self) -> Optional[pulumi.Input[int]]:
"""
Lifetime of cookies in seconds. This setting is applicable to external and internal HTTP(S) load balancers and Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is one day (86,400). Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.
"""
return pulumi.get(self, "affinity_cookie_ttl_sec")
@affinity_cookie_ttl_sec.setter
def affinity_cookie_ttl_sec(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "affinity_cookie_ttl_sec", value)
@property
@pulumi.getter
def backends(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BackendArgs']]]]:
"""
The list of backends that serve this BackendService.
"""
return pulumi.get(self, "backends")
@backends.setter
def backends(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BackendArgs']]]]):
pulumi.set(self, "backends", value)
@property
@pulumi.getter(name="cdnPolicy")
def cdn_policy(self) -> Optional[pulumi.Input['BackendServiceCdnPolicyArgs']]:
"""
Cloud CDN configuration for this BackendService. Only available for specified load balancer types.
"""
return pulumi.get(self, "cdn_policy")
@cdn_policy.setter
def cdn_policy(self, value: Optional[pulumi.Input['BackendServiceCdnPolicyArgs']]):
pulumi.set(self, "cdn_policy", value)
@property
@pulumi.getter(name="circuitBreakers")
def circuit_breakers(self) -> Optional[pulumi.Input['CircuitBreakersArgs']]:
return pulumi.get(self, "circuit_breakers")
@circuit_breakers.setter
def circuit_breakers(self, value: Optional[pulumi.Input['CircuitBreakersArgs']]):
pulumi.set(self, "circuit_breakers", value)
@property
@pulumi.getter(name="compressionMode")
def compression_mode(self) -> Optional[pulumi.Input['RegionBackendServiceCompressionMode']]:
"""
Compress text responses using Brotli or gzip compression, based on the client's Accept-Encoding header.
"""
return pulumi.get(self, "compression_mode")
@compression_mode.setter
def compression_mode(self, value: Optional[pulumi.Input['RegionBackendServiceCompressionMode']]):
pulumi.set(self, "compression_mode", value)
@property
@pulumi.getter(name="connectionDraining")
def connection_draining(self) -> Optional[pulumi.Input['ConnectionDrainingArgs']]:
return pulumi.get(self, "connection_draining")
@connection_draining.setter
def connection_draining(self, value: Optional[pulumi.Input['ConnectionDrainingArgs']]):
pulumi.set(self, "connection_draining", value)
@property
@pulumi.getter(name="connectionTrackingPolicy")
def connection_tracking_policy(self) -> Optional[pulumi.Input['BackendServiceConnectionTrackingPolicyArgs']]:
"""
Connection Tracking configuration for this BackendService. Connection tracking policy settings are only available for Network Load Balancing and Internal TCP/UDP Load Balancing.
"""
return pulumi.get(self, "connection_tracking_policy")
@connection_tracking_policy.setter
def connection_tracking_policy(self, value: Optional[pulumi.Input['BackendServiceConnectionTrackingPolicyArgs']]):
pulumi.set(self, "connection_tracking_policy", value)
@property
@pulumi.getter(name="consistentHash")
def consistent_hash(self) -> Optional[pulumi.Input['ConsistentHashLoadBalancerSettingsArgs']]:
"""
Consistent Hash-based load balancing can be used to provide soft session affinity based on HTTP headers, cookies or other properties. This load balancing policy is applicable only for HTTP connections. The affinity to a particular destination host will be lost when one or more hosts are added/removed from the destination service. This field specifies parameters that control consistent hashing. This field is only applicable when localityLbPolicy is set to MAGLEV or RING_HASH. This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED.
"""
return pulumi.get(self, "consistent_hash")
@consistent_hash.setter
def consistent_hash(self, value: Optional[pulumi.Input['ConsistentHashLoadBalancerSettingsArgs']]):
pulumi.set(self, "consistent_hash", value)
@property
@pulumi.getter(name="customRequestHeaders")
def custom_request_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Headers that the load balancer adds to proxied requests. See [Creating custom headers](https://cloud.google.com/load-balancing/docs/custom-headers).
"""
return pulumi.get(self, "custom_request_headers")
@custom_request_headers.setter
def custom_request_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "custom_request_headers", value)
@property
@pulumi.getter(name="customResponseHeaders")
def custom_response_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Headers that the load balancer adds to proxied responses. See [Creating custom headers](https://cloud.google.com/load-balancing/docs/custom-headers).
"""
return pulumi.get(self, "custom_response_headers")
@custom_response_headers.setter
def custom_response_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "custom_response_headers", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
An optional description of this resource. Provide this property when you create the resource.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="enableCDN")
def enable_cdn(self) -> Optional[pulumi.Input[bool]]:
"""
If true, enables Cloud CDN for the backend service of an external HTTP(S) load balancer.
"""
return pulumi.get(self, "enable_cdn")
@enable_cdn.setter
def enable_cdn(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_cdn", value)
@property
@pulumi.getter(name="failoverPolicy")
def failover_policy(self) -> Optional[pulumi.Input['BackendServiceFailoverPolicyArgs']]:
"""
Requires at least one backend instance group to be defined as a backup (failover) backend. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview).
"""
return pulumi.get(self, "failover_policy")
@failover_policy.setter
def failover_policy(self, value: Optional[pulumi.Input['BackendServiceFailoverPolicyArgs']]):
pulumi.set(self, "failover_policy", value)
@property
@pulumi.getter(name="healthChecks")
def health_checks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of URLs to the healthChecks, httpHealthChecks (legacy), or httpsHealthChecks (legacy) resource for health checking this backend service. Not all backend services support legacy health checks. See Load balancer guide. Currently, at most one health check can be specified for each backend service. Backend services with instance group or zonal NEG backends must have a health check. Backend services with internet or serverless NEG backends must not have a health check.
"""
return pulumi.get(self, "health_checks")
@health_checks.setter
def health_checks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "health_checks", value)
@property
@pulumi.getter
def iap(self) -> Optional[pulumi.Input['BackendServiceIAPArgs']]:
"""
The configurations for Identity-Aware Proxy on this resource. Not available for Internal TCP/UDP Load Balancing and Network Load Balancing.
"""
return pulumi.get(self, "iap")
@iap.setter
def iap(self, value: Optional[pulumi.Input['BackendServiceIAPArgs']]):
pulumi.set(self, "iap", value)
@property
@pulumi.getter(name="loadBalancingScheme")
def load_balancing_scheme(self) -> Optional[pulumi.Input['RegionBackendServiceLoadBalancingScheme']]:
"""
Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer.
"""
return pulumi.get(self, "load_balancing_scheme")
@load_balancing_scheme.setter
def load_balancing_scheme(self, value: Optional[pulumi.Input['RegionBackendServiceLoadBalancingScheme']]):
pulumi.set(self, "load_balancing_scheme", value)
@property
@pulumi.getter(name="localityLbPolicy")
def locality_lb_policy(self) -> Optional[pulumi.Input['RegionBackendServiceLocalityLbPolicy']]:
"""
The load balancing algorithm used within the scope of the locality. The possible values are: - ROUND_ROBIN: This is a simple policy in which each healthy backend is selected in round robin order. This is the default. - LEAST_REQUEST: An O(1) algorithm which selects two random healthy hosts and picks the host which has fewer active requests. - RING_HASH: The ring/modulo hash load balancer implements consistent hashing to backends. The algorithm has the property that the addition/removal of a host from a set of N hosts only affects 1/N of the requests. - RANDOM: The load balancer selects a random healthy host. - ORIGINAL_DESTINATION: Backend host is selected based on the client connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection before the connection was redirected to the load balancer. - MAGLEV: used as a drop in replacement for the ring hash load balancer. Maglev is not as stable as ring hash but has faster table lookup build times and host selection times. For more information about Maglev, see https://ai.google/research/pubs/pub44824 This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. If sessionAffinity is not NONE, and this field is not set to MAGLEV or RING_HASH, session affinity settings will not take effect. Only ROUND_ROBIN and RING_HASH are supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.
"""
return pulumi.get(self, "locality_lb_policy")
@locality_lb_policy.setter
def locality_lb_policy(self, value: Optional[pulumi.Input['RegionBackendServiceLocalityLbPolicy']]):
pulumi.set(self, "locality_lb_policy", value)
@property
@pulumi.getter(name="logConfig")
def log_config(self) -> Optional[pulumi.Input['BackendServiceLogConfigArgs']]:
"""
This field denotes the logging options for the load balancer traffic served by this backend service. If logging is enabled, logs will be exported to Stackdriver.
"""
return pulumi.get(self, "log_config")
@log_config.setter
def log_config(self, value: Optional[pulumi.Input['BackendServiceLogConfigArgs']]):
pulumi.set(self, "log_config", value)
@property
@pulumi.getter(name="maxStreamDuration")
def max_stream_duration(self) -> Optional[pulumi.Input['DurationArgs']]:
"""
Specifies the default maximum duration (timeout) for streams to this service. Duration is computed from the beginning of the stream until the response has been completely processed, including all retries. A stream that does not complete in this duration is closed. If not specified, there will be no timeout limit, i.e. the maximum duration is infinite. This value can be overridden in the PathMatcher configuration of the UrlMap that references this backend service. This field is only allowed when the loadBalancingScheme of the backend service is INTERNAL_SELF_MANAGED.
"""
return pulumi.get(self, "max_stream_duration")
@max_stream_duration.setter
def max_stream_duration(self, value: Optional[pulumi.Input['DurationArgs']]):
pulumi.set(self, "max_stream_duration", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def network(self) -> Optional[pulumi.Input[str]]:
"""
The URL of the network to which this backend service belongs. This field can only be specified when the load balancing scheme is set to INTERNAL.
"""
return pulumi.get(self, "network")
@network.setter
def network(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network", value)
@property
@pulumi.getter(name="outlierDetection")
def outlier_detection(self) -> Optional[pulumi.Input['OutlierDetectionArgs']]:
"""
Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. If not set, this feature is considered disabled. This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.
"""
return pulumi.get(self, "outlier_detection")
@outlier_detection.setter
def outlier_detection(self, value: Optional[pulumi.Input['OutlierDetectionArgs']]):
pulumi.set(self, "outlier_detection", value)
@property
@pulumi.getter(name="portName")
def port_name(self) -> Optional[pulumi.Input[str]]:
"""
A named port on a backend instance group representing the port for communication to the backend VMs in that group. The named port must be [defined on each backend instance group](https://cloud.google.com/load-balancing/docs/backend-service#named_ports). This parameter has no meaning if the backends are NEGs. For Internal TCP/UDP Load Balancing and Network Load Balancing, omit port_name.
"""
return pulumi.get(self, "port_name")
@port_name.setter
def port_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "port_name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input['RegionBackendServiceProtocol']]:
"""
The protocol this BackendService uses to communicate with backends. Possible values are HTTP, HTTPS, HTTP2, TCP, SSL, UDP or GRPC. depending on the chosen load balancer or Traffic Director configuration. Refer to the documentation for the load balancers or for Traffic Director for more information. Must be set to GRPC when the backend service is referenced by a URL map that is bound to target gRPC proxy.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input['RegionBackendServiceProtocol']]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="requestId")
def request_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "request_id")
@request_id.setter
def request_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_id", value)
@property
@pulumi.getter(name="securitySettings")
def security_settings(self) -> Optional[pulumi.Input['SecuritySettingsArgs']]:
"""
This field specifies the security policy that applies to this backend service. This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED.
"""
return pulumi.get(self, "security_settings")
@security_settings.setter
def security_settings(self, value: Optional[pulumi.Input['SecuritySettingsArgs']]):
pulumi.set(self, "security_settings", value)
@property
@pulumi.getter(name="serviceBindings")
def service_bindings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
URLs of networkservices.ServiceBinding resources. Can only be set if load balancing scheme is INTERNAL_SELF_MANAGED. If set, lists of backends and health checks must be both empty.
"""
return pulumi.get(self, "service_bindings")
@service_bindings.setter
def service_bindings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "service_bindings", value)
@property
@pulumi.getter(name="sessionAffinity")
def session_affinity(self) -> Optional[pulumi.Input['RegionBackendServiceSessionAffinity']]:
"""
Type of session affinity to use. The default is NONE. Only NONE and HEADER_FIELD are supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. For more details, see: [Session Affinity](https://cloud.google.com/load-balancing/docs/backend-service#session_affinity).
"""
return pulumi.get(self, "session_affinity")
@session_affinity.setter
def session_affinity(self, value: Optional[pulumi.Input['RegionBackendServiceSessionAffinity']]):
pulumi.set(self, "session_affinity", value)
@property
@pulumi.getter
def subsetting(self) -> Optional[pulumi.Input['SubsettingArgs']]:
return pulumi.get(self, "subsetting")
@subsetting.setter
def subsetting(self, value: Optional[pulumi.Input['SubsettingArgs']]):
pulumi.set(self, "subsetting", value)
@property
@pulumi.getter(name="timeoutSec")
def timeout_sec(self) -> Optional[pulumi.Input[int]]:
"""
Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. Instead, use maxStreamDuration.
"""
return pulumi.get(self, "timeout_sec")
@timeout_sec.setter
def timeout_sec(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_sec", value)
class RegionBackendService(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
affinity_cookie_ttl_sec: Optional[pulumi.Input[int]] = None,
backends: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BackendArgs']]]]] = None,
cdn_policy: Optional[pulumi.Input[pulumi.InputType['BackendServiceCdnPolicyArgs']]] = None,
circuit_breakers: Optional[pulumi.Input[pulumi.InputType['CircuitBreakersArgs']]] = None,
compression_mode: Optional[pulumi.Input['RegionBackendServiceCompressionMode']] = None,
connection_draining: Optional[pulumi.Input[pulumi.InputType['ConnectionDrainingArgs']]] = None,
connection_tracking_policy: Optional[pulumi.Input[pulumi.InputType['BackendServiceConnectionTrackingPolicyArgs']]] = None,
consistent_hash: Optional[pulumi.Input[pulumi.InputType['ConsistentHashLoadBalancerSettingsArgs']]] = None,
custom_request_headers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
custom_response_headers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
enable_cdn: Optional[pulumi.Input[bool]] = None,
failover_policy: Optional[pulumi.Input[pulumi.InputType['BackendServiceFailoverPolicyArgs']]] = None,
health_checks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
iap: Optional[pulumi.Input[pulumi.InputType['BackendServiceIAPArgs']]] = None,
load_balancing_scheme: Optional[pulumi.Input['RegionBackendServiceLoadBalancingScheme']] = None,
locality_lb_policy: Optional[pulumi.Input['RegionBackendServiceLocalityLbPolicy']] = None,
log_config: Optional[pulumi.Input[pulumi.InputType['BackendServiceLogConfigArgs']]] = None,
max_stream_duration: Optional[pulumi.Input[pulumi.InputType['DurationArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
network: Optional[pulumi.Input[str]] = None,
outlier_detection: Optional[pulumi.Input[pulumi.InputType['OutlierDetectionArgs']]] = None,
port_name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input['RegionBackendServiceProtocol']] = None,
region: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
security_settings: Optional[pulumi.Input[pulumi.InputType['SecuritySettingsArgs']]] = None,
service_bindings: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
session_affinity: Optional[pulumi.Input['RegionBackendServiceSessionAffinity']] = None,
subsetting: Optional[pulumi.Input[pulumi.InputType['SubsettingArgs']]] = None,
timeout_sec: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
Creates a regional BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] affinity_cookie_ttl_sec: Lifetime of cookies in seconds. This setting is applicable to external and internal HTTP(S) load balancers and Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is one day (86,400). Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BackendArgs']]]] backends: The list of backends that serve this BackendService.
:param pulumi.Input[pulumi.InputType['BackendServiceCdnPolicyArgs']] cdn_policy: Cloud CDN configuration for this BackendService. Only available for specified load balancer types.
:param pulumi.Input['RegionBackendServiceCompressionMode'] compression_mode: Compress text responses using Brotli or gzip compression, based on the client's Accept-Encoding header.
:param pulumi.Input[pulumi.InputType['BackendServiceConnectionTrackingPolicyArgs']] connection_tracking_policy: Connection Tracking configuration for this BackendService. Connection tracking policy settings are only available for Network Load Balancing and Internal TCP/UDP Load Balancing.
:param pulumi.Input[pulumi.InputType['ConsistentHashLoadBalancerSettingsArgs']] consistent_hash: Consistent Hash-based load balancing can be used to provide soft session affinity based on HTTP headers, cookies or other properties. This load balancing policy is applicable only for HTTP connections. The affinity to a particular destination host will be lost when one or more hosts are added/removed from the destination service. This field specifies parameters that control consistent hashing. This field is only applicable when localityLbPolicy is set to MAGLEV or RING_HASH. This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED.
:param pulumi.Input[Sequence[pulumi.Input[str]]] custom_request_headers: Headers that the load balancer adds to proxied requests. See [Creating custom headers](https://cloud.google.com/load-balancing/docs/custom-headers).
:param pulumi.Input[Sequence[pulumi.Input[str]]] custom_response_headers: Headers that the load balancer adds to proxied responses. See [Creating custom headers](https://cloud.google.com/load-balancing/docs/custom-headers).
:param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource.
:param pulumi.Input[bool] enable_cdn: If true, enables Cloud CDN for the backend service of an external HTTP(S) load balancer.
:param pulumi.Input[pulumi.InputType['BackendServiceFailoverPolicyArgs']] failover_policy: Requires at least one backend instance group to be defined as a backup (failover) backend. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview).
:param pulumi.Input[Sequence[pulumi.Input[str]]] health_checks: The list of URLs to the healthChecks, httpHealthChecks (legacy), or httpsHealthChecks (legacy) resource for health checking this backend service. Not all backend services support legacy health checks. See Load balancer guide. Currently, at most one health check can be specified for each backend service. Backend services with instance group or zonal NEG backends must have a health check. Backend services with internet or serverless NEG backends must not have a health check.
:param pulumi.Input[pulumi.InputType['BackendServiceIAPArgs']] iap: The configurations for Identity-Aware Proxy on this resource. Not available for Internal TCP/UDP Load Balancing and Network Load Balancing.
:param pulumi.Input['RegionBackendServiceLoadBalancingScheme'] load_balancing_scheme: Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer.
:param pulumi.Input['RegionBackendServiceLocalityLbPolicy'] locality_lb_policy: The load balancing algorithm used within the scope of the locality. The possible values are: - ROUND_ROBIN: This is a simple policy in which each healthy backend is selected in round robin order. This is the default. - LEAST_REQUEST: An O(1) algorithm which selects two random healthy hosts and picks the host which has fewer active requests. - RING_HASH: The ring/modulo hash load balancer implements consistent hashing to backends. The algorithm has the property that the addition/removal of a host from a set of N hosts only affects 1/N of the requests. - RANDOM: The load balancer selects a random healthy host. - ORIGINAL_DESTINATION: Backend host is selected based on the client connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection before the connection was redirected to the load balancer. - MAGLEV: used as a drop in replacement for the ring hash load balancer. Maglev is not as stable as ring hash but has faster table lookup build times and host selection times. For more information about Maglev, see https://ai.google/research/pubs/pub44824 This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. If sessionAffinity is not NONE, and this field is not set to MAGLEV or RING_HASH, session affinity settings will not take effect. Only ROUND_ROBIN and RING_HASH are supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.
:param pulumi.Input[pulumi.InputType['BackendServiceLogConfigArgs']] log_config: This field denotes the logging options for the load balancer traffic served by this backend service. If logging is enabled, logs will be exported to Stackdriver.
:param pulumi.Input[pulumi.InputType['DurationArgs']] max_stream_duration: Specifies the default maximum duration (timeout) for streams to this service. Duration is computed from the beginning of the stream until the response has been completely processed, including all retries. A stream that does not complete in this duration is closed. If not specified, there will be no timeout limit, i.e. the maximum duration is infinite. This value can be overridden in the PathMatcher configuration of the UrlMap that references this backend service. This field is only allowed when the loadBalancingScheme of the backend service is INTERNAL_SELF_MANAGED.
:param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
:param pulumi.Input[str] network: The URL of the network to which this backend service belongs. This field can only be specified when the load balancing scheme is set to INTERNAL.
:param pulumi.Input[pulumi.InputType['OutlierDetectionArgs']] outlier_detection: Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. If not set, this feature is considered disabled. This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.
:param pulumi.Input[str] port_name: A named port on a backend instance group representing the port for communication to the backend VMs in that group. The named port must be [defined on each backend instance group](https://cloud.google.com/load-balancing/docs/backend-service#named_ports). This parameter has no meaning if the backends are NEGs. For Internal TCP/UDP Load Balancing and Network Load Balancing, omit port_name.
:param pulumi.Input['RegionBackendServiceProtocol'] protocol: The protocol this BackendService uses to communicate with backends. Possible values are HTTP, HTTPS, HTTP2, TCP, SSL, UDP or GRPC. depending on the chosen load balancer or Traffic Director configuration. Refer to the documentation for the load balancers or for Traffic Director for more information. Must be set to GRPC when the backend service is referenced by a URL map that is bound to target gRPC proxy.
:param pulumi.Input[pulumi.InputType['SecuritySettingsArgs']] security_settings: This field specifies the security policy that applies to this backend service. This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED.
:param pulumi.Input[Sequence[pulumi.Input[str]]] service_bindings: URLs of networkservices.ServiceBinding resources. Can only be set if load balancing scheme is INTERNAL_SELF_MANAGED. If set, lists of backends and health checks must be both empty.
:param pulumi.Input['RegionBackendServiceSessionAffinity'] session_affinity: Type of session affinity to use. The default is NONE. Only NONE and HEADER_FIELD are supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. For more details, see: [Session Affinity](https://cloud.google.com/load-balancing/docs/backend-service#session_affinity).
:param pulumi.Input[int] timeout_sec: Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. Instead, use maxStreamDuration.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RegionBackendServiceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a regional BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview.
:param str resource_name: The name of the resource.
:param RegionBackendServiceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RegionBackendServiceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
affinity_cookie_ttl_sec: Optional[pulumi.Input[int]] = None,
backends: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BackendArgs']]]]] = None,
cdn_policy: Optional[pulumi.Input[pulumi.InputType['BackendServiceCdnPolicyArgs']]] = None,
circuit_breakers: Optional[pulumi.Input[pulumi.InputType['CircuitBreakersArgs']]] = None,
compression_mode: Optional[pulumi.Input['RegionBackendServiceCompressionMode']] = None,
connection_draining: Optional[pulumi.Input[pulumi.InputType['ConnectionDrainingArgs']]] = None,
connection_tracking_policy: Optional[pulumi.Input[pulumi.InputType['BackendServiceConnectionTrackingPolicyArgs']]] = None,
consistent_hash: Optional[pulumi.Input[pulumi.InputType['ConsistentHashLoadBalancerSettingsArgs']]] = None,
custom_request_headers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
custom_response_headers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
enable_cdn: Optional[pulumi.Input[bool]] = None,
failover_policy: Optional[pulumi.Input[pulumi.InputType['BackendServiceFailoverPolicyArgs']]] = None,
health_checks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
iap: Optional[pulumi.Input[pulumi.InputType['BackendServiceIAPArgs']]] = None,
load_balancing_scheme: Optional[pulumi.Input['RegionBackendServiceLoadBalancingScheme']] = None,
locality_lb_policy: Optional[pulumi.Input['RegionBackendServiceLocalityLbPolicy']] = None,
log_config: Optional[pulumi.Input[pulumi.InputType['BackendServiceLogConfigArgs']]] = None,
max_stream_duration: Optional[pulumi.Input[pulumi.InputType['DurationArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
network: Optional[pulumi.Input[str]] = None,
outlier_detection: Optional[pulumi.Input[pulumi.InputType['OutlierDetectionArgs']]] = None,
port_name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input['RegionBackendServiceProtocol']] = None,
region: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
security_settings: Optional[pulumi.Input[pulumi.InputType['SecuritySettingsArgs']]] = None,
service_bindings: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
session_affinity: Optional[pulumi.Input['RegionBackendServiceSessionAffinity']] = None,
subsetting: Optional[pulumi.Input[pulumi.InputType['SubsettingArgs']]] = None,
timeout_sec: Optional[pulumi.Input[int]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RegionBackendServiceArgs.__new__(RegionBackendServiceArgs)
__props__.__dict__["affinity_cookie_ttl_sec"] = affinity_cookie_ttl_sec
__props__.__dict__["backends"] = backends
__props__.__dict__["cdn_policy"] = cdn_policy
__props__.__dict__["circuit_breakers"] = circuit_breakers
__props__.__dict__["compression_mode"] = compression_mode
__props__.__dict__["connection_draining"] = connection_draining
__props__.__dict__["connection_tracking_policy"] = connection_tracking_policy
__props__.__dict__["consistent_hash"] = consistent_hash
__props__.__dict__["custom_request_headers"] = custom_request_headers
__props__.__dict__["custom_response_headers"] = custom_response_headers
__props__.__dict__["description"] = description
__props__.__dict__["enable_cdn"] = enable_cdn
__props__.__dict__["failover_policy"] = failover_policy
__props__.__dict__["health_checks"] = health_checks
__props__.__dict__["iap"] = iap
__props__.__dict__["load_balancing_scheme"] = load_balancing_scheme
__props__.__dict__["locality_lb_policy"] = locality_lb_policy
__props__.__dict__["log_config"] = log_config
__props__.__dict__["max_stream_duration"] = max_stream_duration
__props__.__dict__["name"] = name
__props__.__dict__["network"] = network
__props__.__dict__["outlier_detection"] = outlier_detection
__props__.__dict__["port_name"] = port_name
__props__.__dict__["project"] = project
__props__.__dict__["protocol"] = protocol
if region is None and not opts.urn:
raise TypeError("Missing required property 'region'")
__props__.__dict__["region"] = region
__props__.__dict__["request_id"] = request_id
__props__.__dict__["security_settings"] = security_settings
__props__.__dict__["service_bindings"] = service_bindings
__props__.__dict__["session_affinity"] = session_affinity
__props__.__dict__["subsetting"] = subsetting
__props__.__dict__["timeout_sec"] = timeout_sec
__props__.__dict__["creation_timestamp"] = None
__props__.__dict__["edge_security_policy"] = None
__props__.__dict__["fingerprint"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["security_policy"] = None
__props__.__dict__["self_link"] = None
super(RegionBackendService, __self__).__init__(
'google-native:compute/beta:RegionBackendService',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'RegionBackendService':
"""
Get an existing RegionBackendService resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = RegionBackendServiceArgs.__new__(RegionBackendServiceArgs)
__props__.__dict__["affinity_cookie_ttl_sec"] = None
__props__.__dict__["backends"] = None
__props__.__dict__["cdn_policy"] = None
__props__.__dict__["circuit_breakers"] = None
__props__.__dict__["compression_mode"] = None
__props__.__dict__["connection_draining"] = None
__props__.__dict__["connection_tracking_policy"] = None
__props__.__dict__["consistent_hash"] = None
__props__.__dict__["creation_timestamp"] = None
__props__.__dict__["custom_request_headers"] = None
__props__.__dict__["custom_response_headers"] = None
__props__.__dict__["description"] = None
__props__.__dict__["edge_security_policy"] = None
__props__.__dict__["enable_cdn"] = None
__props__.__dict__["failover_policy"] = None
__props__.__dict__["fingerprint"] = None
__props__.__dict__["health_checks"] = None
__props__.__dict__["iap"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["load_balancing_scheme"] = None
__props__.__dict__["locality_lb_policy"] = None
__props__.__dict__["log_config"] = None
__props__.__dict__["max_stream_duration"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network"] = None
__props__.__dict__["outlier_detection"] = None
__props__.__dict__["port_name"] = None
__props__.__dict__["protocol"] = None
__props__.__dict__["region"] = None
__props__.__dict__["security_policy"] = None
__props__.__dict__["security_settings"] = None
__props__.__dict__["self_link"] = None
__props__.__dict__["service_bindings"] = None
__props__.__dict__["session_affinity"] = None
__props__.__dict__["subsetting"] = None
__props__.__dict__["timeout_sec"] = None
return RegionBackendService(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="affinityCookieTtlSec")
def affinity_cookie_ttl_sec(self) -> pulumi.Output[int]:
"""
Lifetime of cookies in seconds. This setting is applicable to external and internal HTTP(S) load balancers and Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is one day (86,400). Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.
"""
return pulumi.get(self, "affinity_cookie_ttl_sec")
@property
@pulumi.getter
def backends(self) -> pulumi.Output[Sequence['outputs.BackendResponse']]:
"""
The list of backends that serve this BackendService.
"""
return pulumi.get(self, "backends")
@property
@pulumi.getter(name="cdnPolicy")
def cdn_policy(self) -> pulumi.Output['outputs.BackendServiceCdnPolicyResponse']:
"""
Cloud CDN configuration for this BackendService. Only available for specified load balancer types.
"""
return pulumi.get(self, "cdn_policy")
@property
@pulumi.getter(name="circuitBreakers")
def circuit_breakers(self) -> pulumi.Output['outputs.CircuitBreakersResponse']:
return pulumi.get(self, "circuit_breakers")
@property
@pulumi.getter(name="compressionMode")
def compression_mode(self) -> pulumi.Output[str]:
"""
Compress text responses using Brotli or gzip compression, based on the client's Accept-Encoding header.
"""
return pulumi.get(self, "compression_mode")
@property
@pulumi.getter(name="connectionDraining")
def connection_draining(self) -> pulumi.Output['outputs.ConnectionDrainingResponse']:
return pulumi.get(self, "connection_draining")
@property
@pulumi.getter(name="connectionTrackingPolicy")
def connection_tracking_policy(self) -> pulumi.Output['outputs.BackendServiceConnectionTrackingPolicyResponse']:
"""
Connection Tracking configuration for this BackendService. Connection tracking policy settings are only available for Network Load Balancing and Internal TCP/UDP Load Balancing.
"""
return pulumi.get(self, "connection_tracking_policy")
@property
@pulumi.getter(name="consistentHash")
def consistent_hash(self) -> pulumi.Output['outputs.ConsistentHashLoadBalancerSettingsResponse']:
"""
Consistent Hash-based load balancing can be used to provide soft session affinity based on HTTP headers, cookies or other properties. This load balancing policy is applicable only for HTTP connections. The affinity to a particular destination host will be lost when one or more hosts are added/removed from the destination service. This field specifies parameters that control consistent hashing. This field is only applicable when localityLbPolicy is set to MAGLEV or RING_HASH. This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED.
"""
return pulumi.get(self, "consistent_hash")
@property
@pulumi.getter(name="creationTimestamp")
def creation_timestamp(self) -> pulumi.Output[str]:
"""
Creation timestamp in RFC3339 text format.
"""
return pulumi.get(self, "creation_timestamp")
@property
@pulumi.getter(name="customRequestHeaders")
def custom_request_headers(self) -> pulumi.Output[Sequence[str]]:
"""
Headers that the load balancer adds to proxied requests. See [Creating custom headers](https://cloud.google.com/load-balancing/docs/custom-headers).
"""
return pulumi.get(self, "custom_request_headers")
@property
@pulumi.getter(name="customResponseHeaders")
def custom_response_headers(self) -> pulumi.Output[Sequence[str]]:
"""
Headers that the load balancer adds to proxied responses. See [Creating custom headers](https://cloud.google.com/load-balancing/docs/custom-headers).
"""
return pulumi.get(self, "custom_response_headers")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
An optional description of this resource. Provide this property when you create the resource.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="edgeSecurityPolicy")
def edge_security_policy(self) -> pulumi.Output[str]:
"""
The resource URL for the edge security policy associated with this backend service.
"""
return pulumi.get(self, "edge_security_policy")
@property
@pulumi.getter(name="enableCDN")
def enable_cdn(self) -> pulumi.Output[bool]:
"""
If true, enables Cloud CDN for the backend service of an external HTTP(S) load balancer.
"""
return pulumi.get(self, "enable_cdn")
@property
@pulumi.getter(name="failoverPolicy")
def failover_policy(self) -> pulumi.Output['outputs.BackendServiceFailoverPolicyResponse']:
"""
Requires at least one backend instance group to be defined as a backup (failover) backend. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview).
"""
return pulumi.get(self, "failover_policy")
@property
@pulumi.getter
def fingerprint(self) -> pulumi.Output[str]:
"""
Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a BackendService. An up-to-date fingerprint must be provided in order to update the BackendService, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a BackendService.
"""
return pulumi.get(self, "fingerprint")
@property
@pulumi.getter(name="healthChecks")
def health_checks(self) -> pulumi.Output[Sequence[str]]:
"""
The list of URLs to the healthChecks, httpHealthChecks (legacy), or httpsHealthChecks (legacy) resource for health checking this backend service. Not all backend services support legacy health checks. See Load balancer guide. Currently, at most one health check can be specified for each backend service. Backend services with instance group or zonal NEG backends must have a health check. Backend services with internet or serverless NEG backends must not have a health check.
"""
return pulumi.get(self, "health_checks")
@property
@pulumi.getter
def iap(self) -> pulumi.Output['outputs.BackendServiceIAPResponse']:
"""
The configurations for Identity-Aware Proxy on this resource. Not available for Internal TCP/UDP Load Balancing and Network Load Balancing.
"""
return pulumi.get(self, "iap")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
Type of resource. Always compute#backendService for backend services.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="loadBalancingScheme")
def load_balancing_scheme(self) -> pulumi.Output[str]:
"""
Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer.
"""
return pulumi.get(self, "load_balancing_scheme")
@property
@pulumi.getter(name="localityLbPolicy")
def locality_lb_policy(self) -> pulumi.Output[str]:
"""
The load balancing algorithm used within the scope of the locality. The possible values are: - ROUND_ROBIN: This is a simple policy in which each healthy backend is selected in round robin order. This is the default. - LEAST_REQUEST: An O(1) algorithm which selects two random healthy hosts and picks the host which has fewer active requests. - RING_HASH: The ring/modulo hash load balancer implements consistent hashing to backends. The algorithm has the property that the addition/removal of a host from a set of N hosts only affects 1/N of the requests. - RANDOM: The load balancer selects a random healthy host. - ORIGINAL_DESTINATION: Backend host is selected based on the client connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection before the connection was redirected to the load balancer. - MAGLEV: used as a drop in replacement for the ring hash load balancer. Maglev is not as stable as ring hash but has faster table lookup build times and host selection times. For more information about Maglev, see https://ai.google/research/pubs/pub44824 This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. If sessionAffinity is not NONE, and this field is not set to MAGLEV or RING_HASH, session affinity settings will not take effect. Only ROUND_ROBIN and RING_HASH are supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.
"""
return pulumi.get(self, "locality_lb_policy")
@property
@pulumi.getter(name="logConfig")
def log_config(self) -> pulumi.Output['outputs.BackendServiceLogConfigResponse']:
"""
This field denotes the logging options for the load balancer traffic served by this backend service. If logging is enabled, logs will be exported to Stackdriver.
"""
return pulumi.get(self, "log_config")
@property
@pulumi.getter(name="maxStreamDuration")
def max_stream_duration(self) -> pulumi.Output['outputs.DurationResponse']:
"""
Specifies the default maximum duration (timeout) for streams to this service. Duration is computed from the beginning of the stream until the response has been completely processed, including all retries. A stream that does not complete in this duration is closed. If not specified, there will be no timeout limit, i.e. the maximum duration is infinite. This value can be overridden in the PathMatcher configuration of the UrlMap that references this backend service. This field is only allowed when the loadBalancingScheme of the backend service is INTERNAL_SELF_MANAGED.
"""
return pulumi.get(self, "max_stream_duration")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def network(self) -> pulumi.Output[str]:
"""
The URL of the network to which this backend service belongs. This field can only be specified when the load balancing scheme is set to INTERNAL.
"""
return pulumi.get(self, "network")
@property
@pulumi.getter(name="outlierDetection")
def outlier_detection(self) -> pulumi.Output['outputs.OutlierDetectionResponse']:
"""
Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. If not set, this feature is considered disabled. This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.
"""
return pulumi.get(self, "outlier_detection")
@property
@pulumi.getter(name="portName")
def port_name(self) -> pulumi.Output[str]:
"""
A named port on a backend instance group representing the port for communication to the backend VMs in that group. The named port must be [defined on each backend instance group](https://cloud.google.com/load-balancing/docs/backend-service#named_ports). This parameter has no meaning if the backends are NEGs. For Internal TCP/UDP Load Balancing and Network Load Balancing, omit port_name.
"""
return pulumi.get(self, "port_name")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[str]:
"""
The protocol this BackendService uses to communicate with backends. Possible values are HTTP, HTTPS, HTTP2, TCP, SSL, UDP or GRPC. depending on the chosen load balancer or Traffic Director configuration. Refer to the documentation for the load balancers or for Traffic Director for more information. Must be set to GRPC when the backend service is referenced by a URL map that is bound to target gRPC proxy.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
"""
URL of the region where the regional backend service resides. This field is not applicable to global backend services. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="securityPolicy")
def security_policy(self) -> pulumi.Output[str]:
"""
The resource URL for the security policy associated with this backend service.
"""
return pulumi.get(self, "security_policy")
@property
@pulumi.getter(name="securitySettings")
def security_settings(self) -> pulumi.Output['outputs.SecuritySettingsResponse']:
"""
This field specifies the security policy that applies to this backend service. This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED.
"""
return pulumi.get(self, "security_settings")
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> pulumi.Output[str]:
"""
Server-defined URL for the resource.
"""
return pulumi.get(self, "self_link")
@property
@pulumi.getter(name="serviceBindings")
def service_bindings(self) -> pulumi.Output[Sequence[str]]:
"""
URLs of networkservices.ServiceBinding resources. Can only be set if load balancing scheme is INTERNAL_SELF_MANAGED. If set, lists of backends and health checks must be both empty.
"""
return pulumi.get(self, "service_bindings")
@property
@pulumi.getter(name="sessionAffinity")
def session_affinity(self) -> pulumi.Output[str]:
"""
Type of session affinity to use. The default is NONE. Only NONE and HEADER_FIELD are supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. For more details, see: [Session Affinity](https://cloud.google.com/load-balancing/docs/backend-service#session_affinity).
"""
return pulumi.get(self, "session_affinity")
@property
@pulumi.getter
def subsetting(self) -> pulumi.Output['outputs.SubsettingResponse']:
return pulumi.get(self, "subsetting")
@property
@pulumi.getter(name="timeoutSec")
def timeout_sec(self) -> pulumi.Output[int]:
"""
Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. Instead, use maxStreamDuration.
"""
return pulumi.get(self, "timeout_sec")
|
py | b40a9677a6091ebb6e6aa0bee4a6419aa765338f | from typing import List, Dict, Union
from typing_extensions import Literal
from .models import Card, Event, Gacha, PartialEvent, PartialGacha
Server = Literal['jp', 'en', 'tw', 'cn', 'kr']
servers = ['jp', 'en', 'tw', 'cn', 'kr']
server_index_map = {server: index for index, server in enumerate(servers)}
def get_card_asset_urls_from_metadata(metadata: Card):
suffix = str(metadata.id // 50)
group_id = '0' * (5 - len(suffix)) + suffix
resource_set_name = metadata.resource_set_name
urls = {
'icon_normal': f'https://bestdori.com/assets/jp/thumb/chara/card{group_id}_rip/{resource_set_name}_normal.png',
'card_normal': f'https://bestdori.com/assets/jp/characters/resourceset/{resource_set_name}_rip/card_normal.png',
}
if metadata.rarity > 2:
urls = {
**urls,
'icon_after_training': f'https://bestdori.com/assets/jp/thumb/chara/card{group_id}_rip/{resource_set_name}_after_training.png',
'card_after_training': f'https://bestdori.com/assets/jp/characters/resourceset/{resource_set_name}_rip/card_after_training.png',
}
return urls
def get_event_asset_urls_from_metadata(metadata: Event):
banner_asset_bundle_name, asset_bundle_name = metadata.banner_asset_bundle_name, metadata.asset_bundle_name
banner_urls = {server: f'https://bestdori.com/assets/{server}/homebanner_rip/{banner_asset_bundle_name}.png' for server in servers}
banner_urls2 = {server: f'https://bestdori.com/assets/{server}/event/{asset_bundle_name}/images_rip/banner.png' for server in servers}
trim_eventtop_urls = {server: f'https://bestdori.com/assets/{server}/event/{asset_bundle_name}/topscreen_rip/trim_eventtop.png' for server in servers}
bg_event_top_urls = {server: f'https://bestdori.com/assets/{server}/event/{asset_bundle_name}/topscreen_rip/bg_eventtop.png' for server in servers}
def is_event_duration_intersected_with_gacha_duration(
event: PartialEvent,
gacha: PartialGacha,
server: Server = 'jp',
) -> bool:
server_index = server_index_map[server]
if gacha.published_at[server_index] and gacha.closed_at[server_index] and event.start_at[server_index] and event.end_at[server_index]:
return not (gacha.published_at[server_index] >= event.end_at[server_index] or gacha.closed_at[server_index] <= event.start_at[server_index])
return False
def get_gacha_during_event(
event: PartialEvent,
gachas: Union[List[PartialGacha], Dict[int, PartialGacha]],
server: Server = 'jp',
):
if isinstance(gachas, list):
return [gacha for gacha in gachas if is_event_duration_intersected_with_gacha_duration(event, gacha, server)]
return {id: gacha for id, gacha in gachas.items() if is_event_duration_intersected_with_gacha_duration(event, gacha, server)}
|
py | b40a971ce8b7bbd12b09a61a19816ed8f4ac24ce | # coding:utf-8
from pagarme.api import default_api
from pagarme.common import make_url
from .resource import Resource
class Plan(Resource):
"""`Plan`:class: wrapping the REST /plans endpoint
"""
def create(self):
response = self.api.post('/plans', self.to_dict())
self.assign(response)
return self.success()
def update(self, attributes=None):
attributes = attributes or self.to_dict()
response = self.api.put(make_url('/plans', str(self.id)), data=attributes)
self.assign(response)
return self.success()
def delete(self):
response = self.api.delete(make_url('/plans', str(self.id)))
self.assign(response)
return self.success()
@classmethod
def find(cls, plan_id):
api = default_api()
url = make_url('/plans', str(plan_id))
return cls(api.get(url))
@classmethod
def all(cls, count=10, page=1):
api = default_api()
params = {'count': count, 'page': page}
response = api.get('/plans', params=params)
return [cls(item) for item in response]
|
py | b40a971f70e75eba44d4e5baeca3a8193d99f231 | """Base class for sparse matrice with a .data attribute
subclasses must provide a _with_data() method that
creates a new matrix with the same sparsity pattern
as self but with a different data array
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .base import spmatrix, _ufuncs_with_fixed_point_at_zero
from .sputils import isscalarlike, validateaxis, matrix
__all__ = []
# TODO implement all relevant operations
# use .data.__methods__() instead of /=, *=, etc.
class _data_matrix(spmatrix):
def __init__(self):
spmatrix.__init__(self)
def _get_dtype(self):
return self.data.dtype
def _set_dtype(self, newtype):
self.data.dtype = newtype
dtype = property(fget=_get_dtype, fset=_set_dtype)
def _deduped_data(self):
if hasattr(self, 'sum_duplicates'):
self.sum_duplicates()
return self.data
def __abs__(self):
return self._with_data(abs(self._deduped_data()))
def _real(self):
return self._with_data(self.data.real)
def _imag(self):
return self._with_data(self.data.imag)
def __neg__(self):
if self.dtype.kind == 'b':
raise NotImplementedError('negating a sparse boolean '
'matrix is not supported')
return self._with_data(-self.data)
def __imul__(self, other): # self *= other
if isscalarlike(other):
self.data *= other
return self
else:
return NotImplemented
def __itruediv__(self, other): # self /= other
if isscalarlike(other):
recip = 1.0 / other
self.data *= recip
return self
else:
return NotImplemented
def astype(self, dtype, casting='unsafe', copy=True):
dtype = np.dtype(dtype)
if self.dtype != dtype:
return self._with_data(
self._deduped_data().astype(dtype, casting=casting, copy=copy),
copy=copy)
elif copy:
return self.copy()
else:
return self
astype.__doc__ = spmatrix.astype.__doc__
def conj(self, copy=True):
if np.issubdtype(self.dtype, np.complexfloating):
return self._with_data(self.data.conj(), copy=copy)
elif copy:
return self.copy()
else:
return self
conj.__doc__ = spmatrix.conj.__doc__
def copy(self):
return self._with_data(self.data.copy(), copy=True)
copy.__doc__ = spmatrix.copy.__doc__
def count_nonzero(self):
return np.count_nonzero(self._deduped_data())
count_nonzero.__doc__ = spmatrix.count_nonzero.__doc__
def power(self, n, dtype=None):
"""
This function performs element-wise power.
Parameters
----------
n : n is a scalar
dtype : If dtype is not specified, the current dtype will be preserved.
"""
if not isscalarlike(n):
raise NotImplementedError("input is not scalar")
data = self._deduped_data()
if dtype is not None:
data = data.astype(dtype)
return self._with_data(data ** n)
###########################
# Multiplication handlers #
###########################
def _mul_scalar(self, other):
return self._with_data(self.data * other)
# Add the numpy unary ufuncs for which func(0) = 0 to _data_matrix.
for npfunc in _ufuncs_with_fixed_point_at_zero:
name = npfunc.__name__
def _create_method(op):
def method(self):
result = op(self._deduped_data())
return self._with_data(result, copy=True)
method.__doc__ = ("Element-wise %s.\n\n"
"See numpy.%s for more information." % (name, name))
method.__name__ = name
return method
setattr(_data_matrix, name, _create_method(npfunc))
def _find_missing_index(ind, n):
for k, a in enumerate(ind):
if k != a:
return k
k += 1
if k < n:
return k
else:
return -1
class _minmax_mixin(object):
"""Mixin for min and max methods.
These are not implemented for dia_matrix, hence the separate class.
"""
def _min_or_max_axis(self, axis, min_or_max):
N = self.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = self.shape[1 - axis]
mat = self.tocsc() if axis == 0 else self.tocsr()
mat.sum_duplicates()
major_index, value = mat._minor_reduce(min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from . import coo_matrix
if axis == 0:
return coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=self.dtype, shape=(1, M))
else:
return coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=self.dtype, shape=(M, 1))
def _min_or_max(self, axis, out, min_or_max):
if out is not None:
raise ValueError(("Sparse matrices do not support "
"an 'out' parameter."))
validateaxis(axis)
if axis is None:
if 0 in self.shape:
raise ValueError("zero-size array to reduction operation")
zero = self.dtype.type(0)
if self.nnz == 0:
return zero
m = min_or_max.reduce(self._deduped_data().ravel())
if self.nnz != np.product(self.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return self._min_or_max_axis(axis, min_or_max)
else:
raise ValueError("axis out of range")
def _arg_min_or_max_axis(self, axis, op, compare):
if self.shape[axis] == 0:
raise ValueError("Can't apply the operation along a zero-sized "
"dimension.")
if axis < 0:
axis += 2
zero = self.dtype.type(0)
mat = self.tocsc() if axis == 0 else self.tocsr()
mat.sum_duplicates()
ret_size, line_size = mat._swap(mat.shape)
ret = np.zeros(ret_size, dtype=int)
nz_lines, = np.nonzero(np.diff(mat.indptr))
for i in nz_lines:
p, q = mat.indptr[i:i + 2]
data = mat.data[p:q]
indices = mat.indices[p:q]
am = op(data)
m = data[am]
if compare(m, zero) or q - p == line_size:
ret[i] = indices[am]
else:
zero_ind = _find_missing_index(indices, line_size)
if m == zero:
ret[i] = min(am, zero_ind)
else:
ret[i] = zero_ind
if axis == 1:
ret = ret.reshape(-1, 1)
return matrix(ret)
def _arg_min_or_max(self, axis, out, op, compare):
if out is not None:
raise ValueError("Sparse matrices do not support "
"an 'out' parameter.")
validateaxis(axis)
if axis is None:
if 0 in self.shape:
raise ValueError("Can't apply the operation to "
"an empty matrix.")
if self.nnz == 0:
return 0
else:
zero = self.dtype.type(0)
mat = self.tocoo()
mat.sum_duplicates()
am = op(mat.data)
m = mat.data[am]
if compare(m, zero):
return mat.row[am] * mat.shape[1] + mat.col[am]
else:
size = np.product(mat.shape)
if size == mat.nnz:
return am
else:
ind = mat.row * mat.shape[1] + mat.col
zero_ind = _find_missing_index(ind, size)
if m == zero:
return min(zero_ind, am)
else:
return zero_ind
return self._arg_min_or_max_axis(axis, op, compare)
def max(self, axis=None, out=None):
"""
Return the maximum of the matrix or maximum along an axis.
This takes all elements into account, not just the non-zero ones.
Parameters
----------
axis : {-2, -1, 0, 1, None} optional
Axis along which the sum is computed. The default is to
compute the maximum over all the matrix elements, returning
a scalar (i.e. `axis` = `None`).
out : None, optional
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except
for the default value, as this argument is not used.
Returns
-------
amax : coo_matrix or scalar
Maximum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is a sparse.coo_matrix of dimension
``a.ndim - 1``.
See Also
--------
min : The minimum value of a sparse matrix along a given axis.
np.matrix.max : NumPy's implementation of 'max' for matrices
"""
return self._min_or_max(axis, out, np.maximum)
def min(self, axis=None, out=None):
"""
Return the minimum of the matrix or maximum along an axis.
This takes all elements into account, not just the non-zero ones.
Parameters
----------
axis : {-2, -1, 0, 1, None} optional
Axis along which the sum is computed. The default is to
compute the minimum over all the matrix elements, returning
a scalar (i.e. `axis` = `None`).
out : None, optional
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except for
the default value, as this argument is not used.
Returns
-------
amin : coo_matrix or scalar
Minimum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is a sparse.coo_matrix of dimension
``a.ndim - 1``.
See Also
--------
max : The maximum value of a sparse matrix along a given axis.
np.matrix.min : NumPy's implementation of 'min' for matrices
"""
return self._min_or_max(axis, out, np.minimum)
def argmax(self, axis=None, out=None):
"""Return indices of maximum elements along an axis.
Implicit zero elements are also taken into account. If there are
several maximum values, the index of the first occurrence is returned.
Parameters
----------
axis : {-2, -1, 0, 1, None}, optional
Axis along which the argmax is computed. If None (default), index
of the maximum element in the flatten data is returned.
out : None, optional
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except for
the default value, as this argument is not used.
Returns
-------
ind : np.matrix or int
Indices of maximum elements. If matrix, its size along `axis` is 1.
"""
return self._arg_min_or_max(axis, out, np.argmax, np.greater)
def argmin(self, axis=None, out=None):
"""Return indices of minimum elements along an axis.
Implicit zero elements are also taken into account. If there are
several minimum values, the index of the first occurrence is returned.
Parameters
----------
axis : {-2, -1, 0, 1, None}, optional
Axis along which the argmin is computed. If None (default), index
of the minimum element in the flatten data is returned.
out : None, optional
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except for
the default value, as this argument is not used.
Returns
-------
ind : np.matrix or int
Indices of minimum elements. If matrix, its size along `axis` is 1.
"""
return self._arg_min_or_max(axis, out, np.argmin, np.less)
|
py | b40a97379b4acbf46aed5210380ec6cc29ec5c8d | # STANDARD LIB
import hashlib
# 3RD PARTY
from django.contrib.auth import logout as django_logout
from django.http import HttpResponseRedirect
from django.utils.encoding import iri_to_uri
from google.appengine.api import users
def login_redirect(request):
return HttpResponseRedirect(users.create_login_url(dest_url=request.GET.get('next')))
def switch_accounts(request):
""" A view which allows a user to change which of their Google accounts they're logged in with.
The URL for the user to be sent to afterwards should be provided in request.GET['next'].
See https://p.ota.to/blog/2014/2/google-multiple-sign-in-on-app-engine/
For the account switching, the user needs to go first to Google's login page. If he/she
gets back with the same user, we send them to the logout URL and *then* the login page.
Scenario:
1. User clicks a 'switch accounts' link which takes them to this view.
2. We redirect them to the Google login screen where - if they are logged into multiple
accounts - they get the opportunity to switch account.
3. Two things may happen:
a. They aren't logged into multiple accounts, so Google redirects them straight back to
us. As we want them to switch account, we send them back to Google's logout URL with
the `continue` url set to the Google login page. => They log into another account.
i. They then return to here, where we clear their session and send them on their way.
b. They actually switched account, and so they come back with a different account and we
redirect them to the original destination set when first visiting this view.
See the steps in the code, referring to the steps of the scenario.
"""
destination = request.GET.get('next', '/')
current_google_user = users.get_current_user()
# Just making sure we don't save readable info in the session as we can't be sure this session
# will be terminated after logout. This is possibly paranoia.
user_hash = hashlib.sha1(current_google_user.user_id()).hexdigest()
previous_user_hash = request.session.get('previous_user')
previous_user_already_redirected = request.session.get('previous_user_already_redirected', False)
if previous_user_hash:
if user_hash == previous_user_hash and not previous_user_already_redirected:
# Step 3.a.
django_logout(request) # Make sure old Django user session gets flushed.
request.session['previous_user'] = user_hash # but add the previous_user hash back in
request.session['previous_user_already_redirected'] = True
# We want to create a URL to the logout URL which then goes to the login URL which then
# goes back to *this* view, which then goes to the final destination
login_url = iri_to_uri(users.create_login_url(request.get_full_path()))
logout_url = users.create_logout_url(login_url)
return HttpResponseRedirect(logout_url)
else:
# Step 3.b, or step 2.a.i.
del request.session['previous_user']
if 'previous_user_already_redirected' in request.session:
del request.session['previous_user_already_redirected']
return HttpResponseRedirect(destination)
else:
# Step 2:
switch_account_url = iri_to_uri(request.get_full_path())
redirect_url = users.create_login_url(switch_account_url)
django_logout(request) # Make sure old Django user session gets flushed.
request.session['previous_user'] = user_hash
return HttpResponseRedirect(redirect_url)
|
py | b40a97555d4c0066a69c581bd172a39d1d444c2e | def f(x):
class c:
nonlocal x
x += 1
def get(self):
return x
return c()
c = f(0)
___assertEqual(c.get(), 1)
___assertNotIn("x", c.__class__.__dict__)
|
py | b40a97dbab9a3b84d1495e8e4bc4e06b27db06e9 | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from petstore_api.model import animal
except ImportError:
animal = sys.modules[
'petstore_api.model.animal']
try:
from petstore_api.model import cat_all_of
except ImportError:
cat_all_of = sys.modules[
'petstore_api.model.cat_all_of']
class Cat(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'class_name': (str,), # noqa: E501
'declawed': (bool,), # noqa: E501
'color': (str,), # noqa: E501
}
@cached_property
def discriminator():
val = {
}
if not val:
return None
return {'class_name': val}
attribute_map = {
'class_name': 'className', # noqa: E501
'declawed': 'declawed', # noqa: E501
'color': 'color', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, class_name, *args, **kwargs): # noqa: E501
"""cat.Cat - a model defined in OpenAPI
Args:
class_name (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
declawed (bool): [optional] # noqa: E501
color (str): [optional] if omitted the server will use the default value of 'red' # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_name': class_name,
}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
'anyOf': [
],
'allOf': [
animal.Animal,
cat_all_of.CatAllOf,
],
'oneOf': [
],
}
|
py | b40a98ee9dff1eb58b1edcdb6d7d41f1b8bbbb19 | a_greet = ["hello", "hi", "hey"]
a_greet_answer = ["hello sir", "hi there", "hi", "hey", "yes sir", "i am here sir"]
a_greet_do = ["what can i do for you", "how can i help", "tell me","thanx to greet"]
a_goodgreet = ["good morning", "good afternoon", "good evening"]
good_morn = ["hope you made a great sleep","how's the josh sir. just kidding", "i love waking up to a command from you", "hope your day will be amazing,just like you",
"You made my morning feel good", "have a great day", "My morning can’t be good if you’re not here"]
good_after = ["hope you had a great morning"]
a_exit = ["shutdown", "goodbye", "good bye", "go offline", "offline", ""] |
py | b40a992b131450d149d7c0c8a38d43154acb9450 | # -*- coding: utf-8 -*-
"""
pyrseas.dbobject.eventtrig
~~~~~~~~~~~~~~~~~~~~~~~~~~
This module defines two classes: EventTrigger derived from
DbObject, and EventTriggerDict derived from DbObjectDict.
"""
from pyrseas.dbobject import DbObjectDict, DbObject
from pyrseas.dbobject import quote_id, commentable
EXEC_PROC = 'EXECUTE PROCEDURE '
class EventTrigger(DbObject):
"""An event trigger"""
keylist = ['name']
objtype = "EVENT TRIGGER"
@commentable
def create(self):
"""Return SQL statements to CREATE the event trigger
:return: SQL statements
"""
filter = ''
if hasattr(self, 'tags'):
filter = "\n WHEN tag IN (%s)" % ", ".join(
["'%s'" % tag for tag in self.tags])
return ["CREATE %s %s\n ON %s%s\n EXECUTE PROCEDURE %s" % (
self.objtype, quote_id(self.name), self.event, filter,
self.procedure)]
class EventTriggerDict(DbObjectDict):
"The collection of event triggers in a database"
cls = EventTrigger
query = \
"""SELECT evtname AS name, evtevent AS event, rolname AS owner,
evtenabled AS enabled, evtfoid::regprocedure AS procedure,
evttags AS tags,
obj_description(t.oid, 'pg_event_trigger') AS description
FROM pg_event_trigger t
JOIN pg_roles ON (evtowner = pg_roles.oid)
ORDER BY 1"""
enable_modes = {'O': True, 'D': False, 'R': 'replica',
'A': 'always'}
def _from_catalog(self):
"""Initialize the dictionary of triggers by querying the catalogs"""
if self.dbconn.version < 90300:
return
for trig in self.fetch():
trig.enabled = self.enable_modes[trig.enabled]
self[trig.key()] = trig
def from_map(self, intriggers, newdb):
"""Initalize the dictionary of triggers by converting the input map
:param intriggers: YAML map defining the event triggers
:param newdb: dictionary of input database
"""
for key in intriggers:
if not key.startswith('event trigger '):
raise KeyError("Unrecognized object type: %s" % key)
trg = key[14:]
intrig = intriggers[key]
if not intrig:
raise ValueError("Event trigger '%s' has no specification" %
trg)
self[trg] = trig = EventTrigger(name=trg)
for attr, val in list(intrig.items()):
setattr(trig, attr, val)
if 'oldname' in intrig:
trig.oldname = intrig['oldname']
if 'description' in intrig:
trig.description = intrig['description']
def diff_map(self, intriggers):
"""Generate SQL to transform existing event triggers
:param intriggers: a YAML map defining the new event triggers
:return: list of SQL statements
Compares the existing event trigger definitions, as fetched
from the catalogs, to the input map and generates SQL
statements to transform the event triggers accordingly.
"""
stmts = []
# check input triggers
for trg in intriggers:
intrig = intriggers[trg]
# does it exist in the database?
if trg not in self:
if not hasattr(intrig, 'oldname'):
# create new trigger
stmts.append(intrig.create())
else:
stmts.append(self[trg].rename(intrig))
else:
# check trigger objects
stmts.append(self[trg].diff_map(intrig))
# check existing triggers
for trg in self:
trig = self[trg]
# if missing, drop them
if trg not in intriggers:
stmts.append(trig.drop())
return stmts
|
py | b40a9a1af1bf629ba98e88becc955390ffbf4569 | from html import unescape
import re
from discord import Webhook, RequestsWebhookAdapter, Embed
import discord
import random
from datetime import datetime
COLORS = [
0x7F0000,
0x535900,
0x40D9FF,
0x8C7399,
0xD97B6C,
0xF2FF40,
0x8FB6BF,
0x502D59,
0x66504D,
0x89B359,
0x00AAFF,
0xD600E6,
0x401100,
0x44FF00,
0x1A2B33,
0xFF00AA,
0xFF8C40,
0x17330D,
0x0066BF,
0x33001B,
0xB39886,
0xBFFFD0,
0x163A59,
0x8C235B,
0x8C5E00,
0x00733D,
0x000C59,
0xFFBFD9,
0x4C3300,
0x36D98D,
0x3D3DF2,
0x590018,
0xF2C200,
0x264D40,
0xC8BFFF,
0xF23D6D,
0xD9C36C,
0x2DB3AA,
0xB380FF,
0xFF0022,
0x333226,
0x005C73,
0x7C29A6,
]
WH_REGEX = r"discord(app)?\.com\/api\/webhooks\/(?P<id>\d+)\/(?P<token>.+)"
def worth_posting_location(location, coordinates, retweeted, include_retweet):
location = [location[i : i + 4] for i in range(0, len(location), 4)]
for box in location:
for coordinate in coordinates:
if box[0] < coordinate[0] < box[2] and box[1] < coordinate[1] < box[3]:
if not include_retweet and retweeted:
return False
return True
return False
def worth_posting_track(track, hashtags, text, retweeted, include_retweet):
for t in track:
if t.startswith("#"):
if t[1:] in map(lambda x: x["text"], hashtags):
if not include_retweet and retweeted:
return False
return True
elif t in text:
if not include_retweet and retweeted:
return False
return True
return False
def worth_posting_follow(
tweeter_id,
twitter_ids,
in_reply_to_twitter_id,
retweeted,
include_reply_to_user,
include_user_reply,
include_retweet,
):
if tweeter_id not in twitter_ids:
worth_posting = False
if include_reply_to_user:
if in_reply_to_twitter_id in twitter_ids:
worth_posting = True
else:
worth_posting = True
if not include_user_reply and in_reply_to_twitter_id is not None:
worth_posting = False
if not include_retweet:
if retweeted:
worth_posting = False
return worth_posting
def keyword_set_present(keyword_sets, text):
for keyword_set in keyword_sets:
keyword_present = [keyword.lower() in text.lower() for keyword in keyword_set]
keyword_set_present = all(keyword_present)
if keyword_set_present:
return True
return False
def blackword_set_present(blackword_sets, text):
if blackword_sets == [[""]]:
return False
for blackword_set in blackword_sets:
blackword_present = [blackword.lower() in text.lower() for blackword in blackword_set]
blackword_set_present = all(blackword_present)
if blackword_set_present:
return True
return False
class Processor:
def __init__(self, status_tweet, discord_config):
self.status_tweet = status_tweet
self.discord_config = discord_config
self.text = ""
self.url = ""
self.user = ""
self.embed = None
self.initialize()
def worth_posting_location(self):
if (
self.status_tweet.get("coordinates", None) is not None
and self.status_tweet["coordinates"].get("coordinates", None) is not None
):
coordinates = [self.status_tweet["coordinates"]["coordinates"]]
else:
coordinates = []
if (
self.status_tweet.get("place", None) is not None
and self.status_tweet["place"].get("bounding_box", None) is not None
and self.status_tweet["place"]["bounding_box"].get("coordinates", None) is not None
):
tmp = self.status_tweet["place"]["bounding_box"]["coordinates"]
else:
tmp = []
for (
tmp_
) in tmp: # for some reason Twitter API places the coordinates into a triple array.......
for c in tmp_:
coordinates.append(c)
return worth_posting_location(
location=self.discord_config.get("location", []),
coordinates=coordinates,
retweeted=self.status_tweet["retweeted"] or "retweeted_status" in self.status_tweet,
include_retweet=self.discord_config.get("IncludeRetweet", True),
)
def worth_posting_track(self):
if "extended_tweet" in self.status_tweet:
hashtags = sorted(
self.status_tweet["extended_tweet"]["entities"]["hashtags"],
key=lambda k: k["text"],
reverse=True,
)
else:
hashtags = sorted(
self.status_tweet["entities"]["hashtags"], key=lambda k: k["text"], reverse=True
)
return worth_posting_track(
track=self.discord_config.get("track", []),
hashtags=hashtags,
text=self.text,
retweeted=self.status_tweet["retweeted"] or "retweeted_status" in self.status_tweet,
include_retweet=self.discord_config.get("IncludeRetweet", True),
)
def worth_posting_follow(self):
return worth_posting_follow(
tweeter_id=self.status_tweet["user"]["id_str"],
twitter_ids=self.discord_config.get("twitter_ids", []),
in_reply_to_twitter_id=self.status_tweet["in_reply_to_user_id_str"],
retweeted=self.status_tweet["retweeted"] or "retweeted_status" in self.status_tweet,
include_reply_to_user=self.discord_config.get("IncludeReplyToUser", True),
include_user_reply=self.discord_config.get("IncludeUserReply", True),
include_retweet=self.discord_config.get("IncludeRetweet", True),
)
def initialize(self):
if "retweeted_status" in self.status_tweet:
if "extended_tweet" in self.status_tweet["retweeted_status"]:
self.text = self.status_tweet["retweeted_status"]["extended_tweet"]["full_text"]
elif "full_text" in self.status_tweet["retweeted_status"]:
self.text = self.status_tweet["retweeted_status"]["full_text"]
else:
self.text = self.status_tweet["retweeted_status"]["text"]
elif "extended_tweet" in self.status_tweet:
self.text = self.status_tweet["extended_tweet"]["full_text"]
elif "full_text" in self.status_tweet:
self.text = self.status_tweet["full_text"]
else:
self.text = self.status_tweet["text"]
for url in self.status_tweet["entities"].get("urls", []):
if url["expanded_url"] is None:
continue
self.text = self.text.replace(
url["url"], "[%s](%s)" % (url["display_url"], url["expanded_url"])
)
for userMention in self.status_tweet["entities"].get("user_mentions", []):
self.text = self.text.replace(
"@%s" % userMention["screen_name"],
"[@%s](https://twitter.com/%s)"
% (userMention["screen_name"], userMention["screen_name"]),
)
if "extended_tweet" in self.status_tweet:
for hashtag in sorted(
self.status_tweet["extended_tweet"]["entities"].get("hashtags", []),
key=lambda k: k["text"],
reverse=True,
):
self.text = self.text.replace(
"#%s" % hashtag["text"],
"[#%s](https://twitter.com/hashtag/%s)" % (hashtag["text"], hashtag["text"]),
)
for hashtag in sorted(
self.status_tweet["entities"].get("hashtags", []),
key=lambda k: k["text"],
reverse=True,
):
self.text = self.text.replace(
"#%s" % hashtag["text"],
"[#%s](https://twitter.com/hashtag/%s)" % (hashtag["text"], hashtag["text"]),
)
self.text = unescape(self.text)
self.url = "https://twitter.com/{}/status/{}".format(
self.status_tweet["user"]["screen_name"], self.status_tweet["id_str"]
)
self.user = self.status_tweet["user"]["name"]
def keyword_set_present(self):
return keyword_set_present(self.discord_config.get("keyword_sets", [[""]]), self.text)
def blackword_set_present(self):
return blackword_set_present(self.discord_config.get("blackword_sets", [[""]]), self.text)
def attach_field(self):
if self.discord_config.get("IncludeQuote", True) and "quoted_status" in self.status_tweet:
if self.status_tweet["quoted_status"].get("text"):
text = self.status_tweet["quoted_status"]["text"]
for url in self.status_tweet["quoted_status"]["entities"].get("urls", []):
if url["expanded_url"] is None:
continue
text = text.replace(
url["url"], "[%s](%s)" % (url["display_url"], url["expanded_url"])
)
for userMention in self.status_tweet["quoted_status"]["entities"].get(
"user_mentions", []
):
text = text.replace(
"@%s" % userMention["screen_name"],
"[@%s](https://twitter.com/%s)"
% (userMention["screen_name"], userMention["screen_name"]),
)
for hashtag in sorted(
self.status_tweet["quoted_status"]["entities"].get("hashtags", []),
key=lambda k: k["text"],
reverse=True,
):
text = text.replace(
"#%s" % hashtag["text"],
"[#%s](https://twitter.com/hashtag/%s)"
% (hashtag["text"], hashtag["text"]),
)
text = unescape(text)
self.embed.add_field(
name=self.status_tweet["quoted_status"]["user"]["screen_name"], value=text
)
def attach_media(self):
if (
self.discord_config.get("IncludeAttachment", True)
and "retweeted_status" in self.status_tweet
):
if (
"extended_tweet" in self.status_tweet["retweeted_status"]
and "media" in self.status_tweet["retweeted_status"]["extended_tweet"]["entities"]
):
for media in self.status_tweet["retweeted_status"]["extended_tweet"]["entities"][
"media"
]:
if media["type"] == "photo":
self.embed.set_image(url=media["media_url_https"])
elif media["type"] == "video":
pass
elif media["type"] == "animated_gif":
pass
if "media" in self.status_tweet["retweeted_status"]["entities"]:
for media in self.status_tweet["retweeted_status"]["entities"]["media"]:
if media["type"] == "photo":
self.embed.set_image(url=media["media_url_https"])
elif media["type"] == "video":
pass
elif media["type"] == "animated_gif":
pass
if (
"extended_entities" in self.status_tweet["retweeted_status"]
and "media" in self.status_tweet["retweeted_status"]["extended_entities"]
):
for media in self.status_tweet["retweeted_status"]["extended_entities"]["media"]:
if media["type"] == "photo":
self.embed.set_image(url=media["media_url_https"])
elif media["type"] == "video":
pass
elif media["type"] == "animated_gif":
pass
else:
if (
"extended_tweet" in self.status_tweet
and "media" in self.status_tweet["extended_tweet"]["entities"]
):
for media in self.status_tweet["extended_tweet"]["entities"]["media"]:
if media["type"] == "photo":
self.embed.set_image(url=media["media_url_https"])
elif media["type"] == "video":
pass
elif media["type"] == "animated_gif":
pass
if "media" in self.status_tweet["entities"]:
for media in self.status_tweet["entities"]["media"]:
if media["type"] == "photo":
self.embed.set_image(url=media["media_url_https"])
elif media["type"] == "video":
pass
elif media["type"] == "animated_gif":
pass
if (
"extended_entities" in self.status_tweet
and "media" in self.status_tweet["extended_entities"]
):
for media in self.status_tweet["extended_entities"]["media"]:
if media["type"] == "photo":
self.embed.set_image(url=media["media_url_https"])
elif media["type"] == "video":
pass
elif media["type"] == "animated_gif":
pass
def create_embed(self):
self.embed = Embed(
colour=random.choice(COLORS),
url="https://twitter.com/{}/status/{}".format(
self.status_tweet["user"]["screen_name"], self.status_tweet["id_str"]
),
title=self.status_tweet["user"]["name"],
description=self.text,
timestamp=datetime.strptime(
self.status_tweet["created_at"], "%a %b %d %H:%M:%S +0000 %Y"
),
)
self.embed.set_author(
name=self.status_tweet["user"]["screen_name"],
url="https://twitter.com/" + self.status_tweet["user"]["screen_name"],
icon_url=self.status_tweet["user"]["profile_image_url"],
)
self.embed.add_field(
name="Quick Links:",
value="**[Tweeter](https://twitter.com/home) | [OnlyDrops.in](https://www.instagram.com/onlydrops.in/) | [DEV](https://www.instagram.com/adityasanehi/)**",
inline= True,
)
self.embed.set_footer(
text="PriceErrors v1.0 | HeavyDrops Profits",
icon_url="https://i.imgur.com/NeJAV1h.jpg",
)
def send_message(self, wh_url):
match = re.search(WH_REGEX, wh_url)
if match:
webhook = Webhook.partial(
int(match.group("id")), match.group("token"), adapter=RequestsWebhookAdapter()
)
try:
if self.discord_config.get("CreateEmbed", True):
webhook.send(
embed=self.embed,
content=self.discord_config.get("custom_message", "").format(
user=self.user, text=self.text, url=self.url
),
)
else:
webhook.send(
content=self.discord_config.get("custom_message", "").format(
user=self.user, text=self.text, url=self.url
)
)
except discord.errors.NotFound as error:
print(
f"---------Error---------\n"
f"discord.errors.NotFound\n"
f"The Webhook does not exist."
f"{error}\n"
f"-----------------------"
)
except discord.errors.Forbidden as error:
print(
f"---------Error---------\n"
f"discord.errors.Forbidden\n"
f"The authorization token of your Webhook is incorrect."
f"{error}\n"
f"-----------------------"
)
except discord.errors.InvalidArgument as error:
print(
f"---------Error---------\n"
f"discord.errors.InvalidArgument\n"
f"You modified the code. You can't mix embed and embeds."
f"{error}\n"
f"-----------------------"
)
except discord.errors.HTTPException as error:
print(
f"---------Error---------\n"
f"discord.errors.HTTPException\n"
f"Your internet connection is whack."
f"{error}\n"
f"-----------------------"
)
else:
print(
f"---------Error---------\n"
f"The following webhook URL is invalid:\n"
f"{wh_url}\n"
f"-----------------------"
)
if __name__ == "__main__":
p = Processor({}, {"keyword_sets": [[""]]})
p.text = "Hello World!"
print(p.keyword_set_present())
|
py | b40a9b50a8a2afa53de282feb163d22637af2729 | VISUALIZE = False # Enable several visualizatios
force = False # Force flow extractions (repeat over all directories)
video_trick = True # Set to true if you want to remove the drift from video features
data_normalization = True # Set to true to perform data normalization
filter_vis = False # Set to true if you want to plot the filter PSD
gait_macro_parmeters = False # Set to true if you want to plot the gait macro parameters
preproc_data_inspection = False # Set to true to produce pdf for visualization of preprocessed data.
|
py | b40a9b57a650b002dd41c95a62b12721e86fecc4 | #!/usr/bin/env python
"""
Script to gracefully restart process after failure.
Makes the XML extraction process tolerant to views that require data.
1) Listen to stdin
2) Catch when a failure occurs in the APK we're testing
3) Resend an implicit intent when a failure occurs
4) Determine when we're done running the app
Author: Akhil Acharya & Ben Andow
Date: September 3, 2015
Updated: February 21, 2018
"""
import os
import sys
import subprocess
import re
import time
import codecs
import signal
#from adb import adb_commands
#from adb import sign_m2crypto
from sys import platform
if platform == "linux" or platform == "linux2":
TIMEOUTCMD = "timeout"
elif platform == "darwin":
TIMEOUTCMD = "gtimeout"
LOGCAT_TIMEOUT = 60 # 60 second timeout
# Timeout if we receive no input from logcat...
def logcatTimeout(signum, frame):
raise Exception('Timeout reached without receiving logcat output.')
class UiRefRenderer:
def __init__(self, adb="adb", rsaKeyPath="~/.android/adbkey", emulatorIpAddr="192.168.56.101:5555"):
self.adb = adb
self.emulatorIpAddr = emulatorIpAddr
#self.rsaKeys = sign_m2crypto.M2CryptoSigner(os.path.expanduser(rsaKeyPath))
#self.connectToDevice()
############################# CONSTANTS #############################
self.LOGCAT_WARNING = "W"
self.LOGCAT_INFO = "I"
self.LOGCAT_GUIRIPPER_TAG = "GuiRipper"
self.LOGCAT_AMS_TAG = "ActivityManager"
self.RENDER_MSG = "Rendering"
self.SCREENDUMP_EXCEPT_MSG = "ScreendumpException"
self.RENDER_EXCEPT_MSG = "RenderingException"
self.RENDER_FAIL_MSG = "RenderingFailure"
self.RENDER_DONE_MSG = "RenderingComplete"
######################################################################
################## Regex patterns for parsing logcat ##################
#TODO rewrite logcat patterns...
#Regex pattern for extracting message
# e.g., <PRIORITY>/<TAG>( <PID>): Rendering(<LAYOUT_ID>):<COUNTER>/<TOTAL>
self.logcat_pattern = re.compile("""
(?P<priority>.*?)\\/
(?P<tag>.*?)
\\(\s*(?P<pid>.*?)\\)\\:\s*
(?P<message>.*)
""", re.VERBOSE)
#Regex pattern for extracting rendering status and exceptions
# <PRIORITY>/GuiRipper( <PID>): <MESSAGE>(<LAYOUT_ID>):<COUNTER>/<TOTAL>
# e.g., W/GuiRipper( 6807): Rendering(2130903065):2/377
# W/GuiRipper( 6807): ScreendumpException(2130903112):38/377
# W/GuiRipper( 6807): RenderingException(2130903153):78/377
# W/GuiRipper( 6983): RenderingFailure(2130903217):141/377
# W/GuiRipper( 6983): RenderingComplete(0):377/377
self.render_log_pattern = re.compile("""
(?P<output_message>.*?)
\\((?P<layout_id>.*?)\\)\\:
(?P<layout_counter>.*?)\\/
(?P<layout_total>.*)
""", re.VERBOSE)
#Regex pattern for detecting failures
# W/ActivityManager( <PID>): Force finishing activity 1 <APK_NAME>/<ACTIVITY>"
# e.g., W/ActivityManager( 748): Force finishing activity 1 com.test.test/com.benandow.android.gui.layoutRendererApp.GuiRipperActivity
self.ams_log_failure_pattern = re.compile("""
Force\\s+finishing\\s+activity\\s*\\d*\\s+
(?P<package_name>.*?)
\\/(?P<activity_name>.*)
""", re.VERBOSE)
#I/ActivityManager( 727): Process com.viewspection.internaluseonly (pid 30600) has died
self.ams_log_failure_pattern2 = re.compile("""
Process\\s+
(?P<package_name>.*?)
\\s+\\(pid\\s+(?P<pid>.*?)\\)\\s+has\\s+died
""", re.VERBOSE)
#Regex pattern for detecting failures
# W/ActivityManager( <PID>): Force finishing activity 1 <APK_NAME>/<ACTIVITY>"
# e.g., W/ActivityManager( 748): Force finishing activity 1 com.test.test/com.benandow.android.gui.layoutRendererApp.GuiRipperActivity
self.ams_log_failure_pattern = re.compile("""
Force\\s+finishing\\s+activity\\s*\\d*\\s+
(?P<package_name>.*?)
\\/(?P<activity_name>.*)
""", re.VERBOSE)
#Regex pattern for detecting failures
# W/ActivityManager( <PID>): Activity stop timeout for ActivityRecord{<HEXNUM> <NUM/LETTERS> <APK_NAME/<ACTIVITY> <NUM/LETTERS>}
# e.g., W/ActivityManager( 587): Activity stop timeout for ActivityRecord{28d356a u0 com.test.test/com.benandow.android.gui.layoutRendererApp.GuiRipperActivity t293}
self.ams_log_failure_pattern3 = re.compile("""
Activity\\s+stop\\s+timeout\\s+for\\s+
ActivityRecord{[a-zA-Z0-9]+\\s+[a-zA-Z0-9]+\\s+
(?P<package_name>.*?)\\/(?P<activity_name>.*)\\s+[a-zA-Z0-9]+}
""", re.VERBOSE)
######################################################################
#def connectToDevice(self):
# self.device = adb_commands.AdbCommands.ConnectDevice(serial=self.emulatorIpAddr, rsa_keys=[self.rsaKeys])
def invokeAdbCommandWithTimeout(self, cmd, timeoutSeconds=u'180'):
cmd.insert(0, timeoutSeconds)
cmd.insert(0, TIMEOUTCMD)
for i in xrange(0,3): # Repeat 3 times max...
res = subprocess.call(cmd)
if res != 124: # Does not equal value returned from timeout command
break
def installApk(self, apk, timeoutMs=3000):
# The python lib doesn't support the -g option to allow all runtime permissions...
self.invokeAdbCommandWithTimeout([self.adb, "-s", self.emulatorIpAddr, "install", "-g", "-r", apk])
# print self.device.Install(apk_path=apk, timeout_ms=timeoutMs)
def checkInstallSucess(self, packageName, timeoutMs=3000):
for i in xrange(0, 3):
popenObj = subprocess.Popen([self.adb, "-s", self.emulatorIpAddr, "shell", "pm", "list", "packages"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out,err = popenObj.communicate()
if popenObj.returncode != 124:
for pkg in out.split('\n'):
if pkg is None or len(pkg.strip()) == 0:
continue
pkg = re.sub(r'^package:', u'', pkg.strip())
if pkg == packageName:
return True
return False
def uninstallApk(self, packageName, timeoutMs=3000):
#self.device.Uninstall(package_name=packageName, timeout_ms=timeoutMs)
self.invokeAdbCommandWithTimeout([self.adb, "-s", self.emulatorIpAddr, "uninstall", packageName])
def clearLogcat(self):
self.invokeAdbCommandWithTimeout([self.adb, "-s", self.emulatorIpAddr, "logcat", "-c"])
def forceStop(self, packageName):
# The python lib doesn't support multiple commands yet and this may be called in the Logcat loop, so spawn a process instead
# self.device.Shell("am force-stop %s" % (packageName,))
self.invokeAdbCommandWithTimeout([self.adb, "-s", self.emulatorIpAddr, "shell", "am", "force-stop", packageName])
def startRendering(self, packageName, forceStop=False):
if forceStop:
self.forceStop(packageName)
# The python lib doesn't support multiple commands yet and this may be called in the Logcat loop, so spawn a process instead
#self.device.Shell("am start -a %s.GuiRipper" % (packageName,))
self.invokeAdbCommandWithTimeout([self.adb, "-s", self.emulatorIpAddr, "shell", "am", "start", "-a", "%s.GuiRipper" % (packageName,)])
def pullFile(self, devicePath, outputPath):
# For some reason, device.Pull tries to convert a bytearray to a string, which clearly breaks for images...
#outputFile = codecs.open(outputPath, 'wb', 'utf-8')
#self.device.Pull(device_filename=devicePath, dest_file=outputFile)
print "adb pull", devicePath, outputPath
self.invokeAdbCommandWithTimeout([self.adb, "-s", self.emulatorIpAddr, "pull", devicePath, outputPath])
def wipeFiles(self, fileStr):
# This is extremely buggy for some reason, just revert to adb for now...
#self.device.Shell("rm %s" % (fileStr,))
self.invokeAdbCommandWithTimeout([self.adb, "-s", self.emulatorIpAddr, "shell", "rm", fileStr])
def readFiles(self, directory='/sdcard/'):
# This is also extremely buggy for some reason, just revert to adb for now...
# return [ fileinfo[0].decode() for fileinfo in self.device.List("/sdcard") ]
for i in xrange(0, 3):
popenObj = subprocess.Popen([self.adb, "-s", self.emulatorIpAddr, "shell", "ls", directory], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out,err = popenObj.communicate()
if popenObj.returncode != 124:
return [ line.strip() for line in out.split('\n') if line is not None and len(line.strip()) > 0 and (line.strip().endswith('.xml') or line.strip().endswith('.png')) ]
return []
#TODO duplication method... refactor
@staticmethod
def ensureDirExists(path):
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
def extractData(self, resultDirectory, packageName):
# Make layouts output directory
layoutsOutputDirectory = os.path.join(*[resultDirectory, packageName, "layouts/"])
UiRefRenderer.ensureDirExists(layoutsOutputDirectory)
# Make screenshots output directory
screenshotsOutputDirectory = os.path.join(*[resultDirectory, packageName, "screenshots/"])
UiRefRenderer.ensureDirExists(screenshotsOutputDirectory)
# For some reason after reading Logcat, the adb connection dies so let's reconnect
#self.connectToDevice()
for filename in self.readFiles(directory='/sdcard/'):
if filename.endswith(".xml"):
self.pullFile(os.path.join("/sdcard/", filename), os.path.join(layoutsOutputDirectory, filename))
elif filename.endswith(".png"):
self.pullFile(os.path.join("/sdcard/", filename), os.path.join(screenshotsOutputDirectory, filename))
self.wipeFiles("/sdcard/*.xml")
self.wipeFiles("/sdcard/*.png")
self.wipeFiles("/sdcard/*.txt")
def startLogcat(self):
# self.device.Logcat("-v brief") is also buggy
return subprocess.Popen([self.adb, "-s", self.emulatorIpAddr, 'logcat', '-v', 'brief'], shell=False, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
def startMonitoring(self, package, logFile, maxTimeoutRetries=10, maxFailureRetries=10):
self.clearLogcat()
self.startRendering(package)
logcatProcess = None
numFailures = 0
timeoutRetryCount = 0
while True: # Outer loop for restarting due to timeouts...
if timeoutRetryCount > maxTimeoutRetries:
logFile.write(u'Hit max number of retries for {}\n'.format(package))
print 'Hit max number of retries for', package
return
try:
self.clearLogcat()
process = self.startLogcat()
signal.alarm(LOGCAT_TIMEOUT) # Start the timer
for line in iter(process.stdout.readline, ''):
if numFailures > 10:
logFile.write(u'Exceeded number of failues for {}\n'.format(package))
print 'Exceeded number of failures for', package
return
if not line:
continue
# Parse logcat line
logcat_line_match = self.logcat_pattern.match(line)
if not logcat_line_match:
continue
priority = logcat_line_match.group("priority")
tag = logcat_line_match.group("tag")
logcat_message = logcat_line_match.group("message")
#If logging message from GUIRipper component
if priority == self.LOGCAT_WARNING and tag == self.LOGCAT_GUIRIPPER_TAG:
signal.alarm(0) # Found a relevant line, disable alarm...
timeoutRetryCount = 0
render_log_match = self.render_log_pattern.match(logcat_message)
if not render_log_match:
signal.alarm(LOGCAT_TIMEOUT) # Start the timer
continue
output_msg = render_log_match.group("output_message")
layout_id = render_log_match.group("layout_id")
layout_counter = render_log_match.group("layout_counter")
layout_total = render_log_match.group("layout_total")
numFailures = 0
if output_msg == self.RENDER_MSG:
logFile.write(u'Rendering {} {}/{}\n'.format(layout_id, layout_counter, layout_total))
print "Rendering "+layout_id+" "+layout_counter+"/"+layout_total
elif output_msg == self.RENDER_DONE_MSG:
##START NEXT APK
logFile.write(u'Finished {} {}/{}\n'.format(layout_id, layout_counter, layout_total))
print "Finished "+layout_id+" "+layout_counter+"/"+layout_total
return
elif output_msg == self.SCREENDUMP_EXCEPT_MSG:
#Log screeshot failure
logFile.write(u'Screenshot failure {} {}/{}\n'.format(layout_id, layout_counter, layout_total))
print "Screenshot failure "+layout_id+" "+layout_counter+"/"+layout_total
elif output_msg == self.RENDER_EXCEPT_MSG:
#Log failure
logFile.write(u'Rendering Exception {} {}/{}\n'.format(layout_id, layout_counter, layout_total))
print "Rendering Exception "+layout_id+" "+layout_counter+"/"+layout_total
elif output_msg == self.RENDER_FAIL_MSG:
#Log the failure
logFile.write(u'Failure {} {}/{}\n'.format(layout_id, layout_counter, layout_total))
print "Failure "+layout_id+" "+layout_counter+"/"+layout_total
#Else if logging message from AMS (i.e., failure occurs)
elif priority == self.LOGCAT_WARNING and tag == self.LOGCAT_AMS_TAG:
ams_log_fail_match = self.ams_log_failure_pattern.match(logcat_message)
if not ams_log_fail_match:
ams_log_fail_match = self.ams_log_failure_pattern3.match(logcat_message)
if not ams_log_fail_match:
continue
signal.alarm(0) # Found a relevant line, disable alarm...
timeoutRetryCount = 0
package_name = ams_log_fail_match.group("package_name")
activity_name = ams_log_fail_match.group("activity_name")
logFile.write(u'Failure occurred ({}) for package {}. Restarting rendering\n'.format(numFailures, package))
print "Failure occurred: restart rendering", numFailures, package
numFailures += 1
self.startRendering(package, forceStop=True)
elif priority == self.LOGCAT_INFO and tag == self.LOGCAT_AMS_TAG:
ams_log_fail_match2 = self.ams_log_failure_pattern2.match(logcat_message)
if not ams_log_fail_match2:
continue
signal.alarm(0) # Found a relevant line, disable alarm...
timeoutRetryCount = 0
package_name = ams_log_fail_match2.group("package_name")
logFile.write(u'Failure occurred ({}) for package {}. Restarting rendering\n'.format(numFailures, package))
print "Failure occurred: restart rendering", numFailures
numFailures += 1
self.startRendering(package, forceStop=True)
# Reset the timer...
signal.alarm(LOGCAT_TIMEOUT) # Reset the timer...
except:
logFile.write(u'Timeout occurred for package {}. Restarting rendering\n'.format(package))
timeoutRetryCount += 1
if process is not None: # Kill the logcat listener instance
process.kill()
self.startRendering(package, forceStop=True)
#TODO duplication method... refactor
@staticmethod
def getAppName(apkPath):
basename = os.path.basename(apkPath)
return os.path.splitext(basename)[0] if apkPath.endswith(".apk") else basename
@staticmethod
def stripVersionNumber(appName):
return re.sub("\-[0-9]+$", "", appName)
### Main entrypoint for ripping GUIs from APK
def ripGUIs(self, apkPath, outputPath):
packageName = UiRefRenderer.getAppName(apkPath)
packageNameWoVersion = UiRefRenderer.stripVersionNumber(packageName)
# If we already processed the layout, skip
if os.path.exists(os.path.join(outputPath, packageName)):
return
# Uninstall first if already installed...
preinstalled = False
if self.checkInstallSucess(packageNameWoVersion):
self.uninstallApk(packageNameWoVersion)
preinstalled = True
UiRefRenderer.ensureDirExists(outputPath)
self.installApk(apkPath)
logFile = codecs.open(os.path.join(outputPath, u'{}.log'.format(packageName)), 'w', 'utf-8')
if not self.checkInstallSucess(packageNameWoVersion):
logFile.write(u'Install failed for {}\n'.format(packageName))
print 'Install failed...', packageName
return
self.startMonitoring(packageNameWoVersion, logFile=logFile)
self.forceStop(packageNameWoVersion)
self.extractData(outputPath, packageName)
if not preinstalled:
self.uninstallApk(packageNameWoVersion)
############################## Testing ###############################
#line ="W/GuiRipper( 6807): Rendering(2130903065):2/377"
#line ="W/GuiRipper( 6807): ScreendumpException(2130903112):38/377"
#line ="W/GuiRipper( 6807): RenderingException(2130903153):78/377"
#line ="W/GuiRipper( 6983): RenderingFailure(2130903217):141/377"
#line ="W/GuiRipper( 6983): RenderingComplete(0):377/377"
#line = "W/ActivityManager( 748): Force finishing activity 1 com.test.test/com.benandow.android.gui.layoutRendererApp.GuiRipperActivity"
#line = "W/ActivityManager( 587): Activity stop timeout for ActivityRecord{28d356a u0 com.test.test/com.benandow.android.gui.layoutRendererApp.GuiRipperActivity t293}"
def regex_test(self, line):
logcat_line_match = self.logcat_pattern.match(line)
priority = logcat_line_match.group("priority")
tag = logcat_line_match.group("tag")
message = logcat_line_match.group("message")
print "PRIORITY = %s\nTAG = %s\nMESSAGE = %s\n\n" % (priority, tag, message)
if priority == self.LOGCAT_WARNING and tag == self.LOGCAT_GUIRIPPER_TAG:
render_log_match = self.render_log_pattern.match(message)
output_msg = render_log_match.group("output_message")
layout_id = render_log_match.group("layout_id")
layout_counter = render_log_match.group("layout_counter")
layout_total = render_log_match.group("layout_total")
print "MSG = %s\nLAYOUT ID = %s\nCOUNTER=%s\nTOTAL=%s\n\n" % (output_msg, layout_id, layout_counter, layout_total)
elif priority == self.LOGCAT_WARNING and tag == self.LOGCAT_AMS_TAG:
ams_log_fail_match = self.ams_log_failure_pattern.match(message)
if not ams_log_fail_match:
ams_log_fail_match = self.ams_log_failure_pattern3.match(message)
package_name = ams_log_fail_match.group("package_name")
activity_name = ams_log_fail_match.group("activity_name")
print "PACKAGE = %s\nACTIVITY = %s\n\n" % (package_name, activity_name)
elif priority == self.LOGCAT_INFO and tag == self.LOGCAT_AMS_TAG:
ams_log_fail_match2 = self.ams_log_failure_pattern2.match(message)
package_name = ams_log_fail_match2.group("package_name")
print "PACKAGE_FAIL = %s\n" % (package_name,)
######################################################################
def main(adbLocation, emulatorIpAddr, apkName, resultsDirectory):
signal.signal(signal.SIGALRM, logcatTimeout) # Register our alarm...
UiRefRenderer(adb=adbLocation, emulatorIpAddr=emulatorIpAddr).ripGUIs(apkName, resultsDirectory)
def mainWalk(adbLocation, emulatorIpAddr, apksLocation, resultsDirectory):
for root,dirnames,files in os.walk(apksLocation):
for filename in files:
if filename.endswith('.apk'):
main(adbLocation, emulatorIpAddr, os.path.join(root, filename), resultsDirectory)
if __name__ == '__main__':
if len(sys.argv) < 5:
print "Usage: %s <adbLocation> <emulatorIpAddr> <apkName> <resultsDirectory>"
sys.exit(1)
mainWalk(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
|
py | b40a9c8ba1b15c01ce4c1301c01da950eb41ea95 | """
Methods for clustering localization data in LocData objects.
"""
import sys
from copy import copy, deepcopy
import numpy as np
import pandas as pd
from scipy import stats
from sklearn.cluster import DBSCAN
from sklearn.neighbors import NearestNeighbors
from locan import HAS_DEPENDENCY, needs_package
if HAS_DEPENDENCY["hdbscan"]:
from hdbscan import HDBSCAN
from locan.configuration import N_JOBS
from locan.data.locdata import LocData
__all__ = ["cluster_hdbscan", "cluster_dbscan"]
@needs_package("hdbscan")
def cluster_hdbscan(
locdata,
min_cluster_size=5,
loc_properties=None,
allow_single_cluster=False,
**kwargs
):
"""
Cluster localizations in locdata using the hdbscan clustering algorithm.
Parameters
----------
locdata : LocData
specifying the localization data on which to perform the manipulation.
loc_properties : list of string, None
The LocData properties to be used for clustering. If None, locdata.coordinates will be used.
min_cluster_size : int
minimumm cluster size in HDBSCAN algorithm (default: 5)
allow_single_cluster : bool
allowing to return single cluster (default: False)
kwargs : dict
Other parameters passed to `hdbscan.HDBSCAN`.
Returns
-------
tuple (LocData, LocData)
A tuple with noise and cluster.
The first LocData object is a selection of all localizations that are defined as noise,
in other words all localizations that are not part of any cluster.
The second LocData object is a LocData instance assembling all generated selections (i.e. localization cluster).
"""
parameter = locals()
if len(locdata) == 0:
locdata_noise = LocData()
collection = LocData()
if len(locdata) < min_cluster_size:
locdata_noise = copy(locdata)
collection = LocData()
else:
if loc_properties is None:
fit_data = locdata.coordinates
else:
fit_data = locdata.data[loc_properties]
labels = HDBSCAN(
min_cluster_size=min_cluster_size,
allow_single_cluster=allow_single_cluster,
gen_min_span_tree=False,
**kwargs
).fit_predict(fit_data)
grouped = locdata.data.groupby(labels)
locdata_index_labels = [
locdata.data.index[idxs] for idxs in grouped.indices.values()
]
selections = [
LocData.from_selection(locdata=locdata, indices=idxs)
for idxs in locdata_index_labels
]
try:
grouped.get_group(-1)
locdata_noise = selections[0]
collection = LocData.from_collection(selections[1:])
except KeyError:
locdata_noise = None
collection = LocData.from_collection(selections)
# set regions
if locdata_noise:
locdata_noise.region = locdata.region
if collection:
collection.region = locdata.region
# metadata
if locdata_noise:
del locdata_noise.meta.history[:]
locdata_noise.meta.history.add(
name=sys._getframe().f_code.co_name, parameter=str(parameter)
)
del collection.meta.history[:]
collection.meta.history.add(
name=sys._getframe().f_code.co_name, parameter=str(parameter)
)
return locdata_noise, collection
def cluster_dbscan(locdata, eps=20, min_samples=5, loc_properties=None, **kwargs):
"""
Cluster localizations in locdata using the dbscan clustering algorithm as implemented in sklearn.
Parameters
----------
locdata : LocData
specifying the localization data on which to perform the manipulation.
eps : float
The maximum distance between two samples for them to be considered as in the same neighborhood.
min_samples : int
The number of samples in a neighborhood for a point to be considered as a core point.
This includes the point itself.
loc_properties : list of string, None
The LocData properties to be used for clustering. If None, locdata.coordinates will be used.
kwargs : dict
Other parameters passed to `sklearn.cluster.DBSCAN`.
Returns
-------
tuple (LocData, LocData)
A tuple with noise and cluster.
The first LocData object is a selection of all localizations that are defined as noise,
in other words all localizations that are not part of any cluster.
The second LocData object is a LocData instance assembling all generated selections (i.e. localization cluster).
"""
parameter = locals()
if len(locdata) == 0:
locdata_noise = LocData()
collection = LocData()
else:
if loc_properties is None:
fit_data = locdata.coordinates
else:
fit_data = locdata.data[loc_properties]
labels = DBSCAN(
eps=eps, min_samples=min_samples, n_jobs=N_JOBS, **kwargs
).fit_predict(fit_data)
grouped = locdata.data.groupby(labels)
locdata_index_labels = [
locdata.data.index[idxs] for idxs in grouped.indices.values()
]
selections = [
LocData.from_selection(locdata=locdata, indices=idxs)
for idxs in locdata_index_labels
]
try:
grouped.get_group(-1)
locdata_noise = selections[0]
collection = LocData.from_collection(selections[1:])
except KeyError:
locdata_noise = None
collection = LocData.from_collection(selections)
# set regions
if locdata_noise:
locdata_noise.region = locdata.region
if collection:
collection.region = locdata.region
# metadata
if locdata_noise:
del locdata_noise.meta.history[:]
locdata_noise.meta.history.add(
name=sys._getframe().f_code.co_name, parameter=str(parameter)
)
del collection.meta.history[:]
collection.meta.history.add(
name=sys._getframe().f_code.co_name, parameter=str(parameter)
)
return locdata_noise, collection
|
py | b40a9d11c8cc19e0217c95787c65f69db4ad9021 | # -*- coding: utf-8 -*-
__author__ = 'Jian'
import log
try:
import requests
except ImportError:
log.logger.warn('requests module not found.')
try:
from bs4 import BeautifulSoup
except ImportError:
log.logger.warn("bs module not found.")
import sys
import os
import time
from subprocess import Popen
import ConfigParser
reload(sys)
sys.setdefaultencoding('utf-8')
'''http://user.jumpss.com/user/register.php'''
class RegSS():
def __init__(self, main_url, nick, email, passwd):
self.main_url = main_url
self.name = nick
self.email = email
self.passwd = passwd
self.keys = ''
self.reg = '/_reg.php'
self.session = requests.session()
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.71 Safari/537.36',
'Host': 'user.jumpss.com',
'Origin': 'http://user.jumpss.com',
'Connection': 'keep-alive',
'Referer': 'http://user.jumpss.com/user/register.php',
'Content-Type': 'application/x-www-form-urlencoded',
'X-Requested-With': 'XMLHttpRequest'
}
def getCaptcha(self):
log.logger.info("Start get captcha ")
timsstamp = str(int(time.time() * 1000))
captcha_URL = 'http://user.jumpss.com/authnum.php?1'
print captcha_URL
captcha = self.session.get(url=captcha_URL, headers=self.headers)
if not os.path.exists("captcha"):
os.mkdir("captcha")
# save captcha
# @sys.path[0]
try:
with open("captcha\\" + timsstamp + '.png', 'wb') as f:
f.write(captcha.content)
Popen(sys.path[0] + "\\captcha\\" + timsstamp + '.png', shell=True)
except:
raise '[!]Captha get failed,error from method of getCaptcha().'
self.keys = str(raw_input("[+]input captcha:"))
log.logger.info("[+]The captcha is :%s", self.keys)
def reg1(self):
log.logger.info("Start register a new user")
r2 = self.session.post(url=(self.main_url + self.reg),
data=dict(email=self.email, name=self.name, passwd=self.passwd, repasswd=self.passwd,
code='', keys=self.keys, invitee='')
)
if 'ok' in str(r2.text):
log.logger.info("register success")
log.logger.info('register email:%s , passwd:%s' % (self.email, self.passwd))
# unicode 转中文参考 http://windkeepblow.blog.163.com/blog/static/1914883312013988185783/
log.logger.info("register info:%s", r2.text.decode("unicode_escape"))
log.logger.info("register finished")
return True
else:
log.logger.info('[!]register failed.')
# unicode 转中文参考 http://windkeepblow.blog.163.com/blog/static/1914883312013988185783/
log.logger.info("register info:%s", r2.text.decode("unicode_escape"))
log.logger.info("register finished")
return False
def main(config_file_path):
r = RegSS
cf = ConfigParser.ConfigParser()
cf.read(config_file_path)
mainUrl = cf.get("registerInfo", "mainUrl")
nick = cf.get("registerInfo", "nick")
passwd = cf.get("registerInfo", "passwd")
email = cf.get("registerInfo", "email")
r = RegSS(mainUrl, nick, email, passwd)
r.getCaptcha()
if r.reg1():
# 写入配置文件
# cf.add_section("TestConfigParser")
rawUserLists = cf.get("loginGeneralSS", "userlists")
cf.set("loginGeneralSS", "userlists", rawUserLists + "|" + r.email + "&" + r.passwd)
cf.write(open("config.ini", "wb"))
else:
pass
###################Test##########################
if __name__ == '__main__':
main('config.ini')
|
py | b40a9db9a876d2249faa1f68b871f2f8b143f5c2 | from tkinter import *
from time import localtime
class Horas:
def __init__(self,raiz):
self.canvas=Canvas(raiz, width=200, height=100)
self.canvas.pack()
self.frame=Frame(raiz)
self.frame.pack()
self.altura = 100 # Altura do canvas
# Desenho do relógio-----------------------------
pol=self.canvas.create_polygon
ret=self.canvas.create_rectangle
self.texto=self.canvas.create_text
self.fonte=('BankGothic Md BT','20','bold')
pol(10, self.altura-10, 40, self.altura-90, 160, self.altura-90, 190, self.altura-10, fill='darkblue')
pol(18, self.altura-15, 45, self.altura-85, 155, self.altura-85, 182, self.altura-15, fill='dodgerblue')
ret(45, self.altura-35, 90, self.altura-60, fill='darkblue', outline='')
ret(110, self.altura-35, 155, self.altura-60, fill='darkblue', outline='')
self.texto(100, self.altura-50, text=':', font=self.fonte, fill='yellow')
# Fim do desenho do relógio-----------------------
self.mostrar=Button(self.frame, text='Que horas são?', command=self.mostra, font=('Comic Sans MS', '11', 'bold'), fg='darkblue', bg='deepskyblue')
self.mostrar.pack(side=LEFT)
def mostra(self):
self.canvas.delete('digitos_HORA')
self.canvas.delete('digitos_MIN')
HORA = str( localtime()[3] )
MINUTO = str( localtime()[4] )
self.texto(67.5, self.altura-50, text=HORA, fill='yellow',
font=self.fonte, tag='digitos_HORA')
self.texto(132.5, self.altura-50, text=MINUTO, fill='yellow', font=self.fonte, tag='digitos_MIN')
if __name__ == '__main__':
instancia=Tk()
Horas(instancia)
instancia.mainloop()
|
py | b40a9e267b367b433663a1574efdac534b59617a | """(disabled by default) support for testing pytest and pytest plugins."""
from __future__ import absolute_import, division, print_function
import codecs
import gc
import os
import platform
import re
import subprocess
import six
import sys
import time
import traceback
from fnmatch import fnmatch
from weakref import WeakKeyDictionary
from _pytest.capture import MultiCapture, SysCapture
from _pytest._code import Source
import py
import pytest
from _pytest.main import Session, EXIT_OK
from _pytest.assertion.rewrite import AssertionRewritingHook
from _pytest.compat import Path
IGNORE_PAM = [ # filenames added when obtaining details about the current user
u"/var/lib/sss/mc/passwd"
]
def pytest_addoption(parser):
parser.addoption(
"--lsof",
action="store_true",
dest="lsof",
default=False,
help=("run FD checks if lsof is available"),
)
parser.addoption(
"--runpytest",
default="inprocess",
dest="runpytest",
choices=("inprocess", "subprocess"),
help=(
"run pytest sub runs in tests using an 'inprocess' "
"or 'subprocess' (python -m main) method"
),
)
parser.addini(
"pytester_example_dir", help="directory to take the pytester example files from"
)
def pytest_configure(config):
if config.getvalue("lsof"):
checker = LsofFdLeakChecker()
if checker.matching_platform():
config.pluginmanager.register(checker)
class LsofFdLeakChecker(object):
def get_open_files(self):
out = self._exec_lsof()
open_files = self._parse_lsof_output(out)
return open_files
def _exec_lsof(self):
pid = os.getpid()
return py.process.cmdexec("lsof -Ffn0 -p %d" % pid)
def _parse_lsof_output(self, out):
def isopen(line):
return line.startswith("f") and (
"deleted" not in line
and "mem" not in line
and "txt" not in line
and "cwd" not in line
)
open_files = []
for line in out.split("\n"):
if isopen(line):
fields = line.split("\0")
fd = fields[0][1:]
filename = fields[1][1:]
if filename in IGNORE_PAM:
continue
if filename.startswith("/"):
open_files.append((fd, filename))
return open_files
def matching_platform(self):
try:
py.process.cmdexec("lsof -v")
except (py.process.cmdexec.Error, UnicodeDecodeError):
# cmdexec may raise UnicodeDecodeError on Windows systems with
# locale other than English:
# https://bitbucket.org/pytest-dev/py/issues/66
return False
else:
return True
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_runtest_protocol(self, item):
lines1 = self.get_open_files()
yield
if hasattr(sys, "pypy_version_info"):
gc.collect()
lines2 = self.get_open_files()
new_fds = {t[0] for t in lines2} - {t[0] for t in lines1}
leaked_files = [t for t in lines2 if t[0] in new_fds]
if leaked_files:
error = []
error.append("***** %s FD leakage detected" % len(leaked_files))
error.extend([str(f) for f in leaked_files])
error.append("*** Before:")
error.extend([str(f) for f in lines1])
error.append("*** After:")
error.extend([str(f) for f in lines2])
error.append(error[0])
error.append("*** function %s:%s: %s " % item.location)
error.append("See issue #2366")
item.warn("", "\n".join(error))
# XXX copied from execnet's conftest.py - needs to be merged
winpymap = {
"python2.7": r"C:\Python27\python.exe",
"python3.4": r"C:\Python34\python.exe",
"python3.5": r"C:\Python35\python.exe",
"python3.6": r"C:\Python36\python.exe",
}
def getexecutable(name, cache={}):
try:
return cache[name]
except KeyError:
executable = py.path.local.sysfind(name)
if executable:
import subprocess
popen = subprocess.Popen(
[str(executable), "--version"],
universal_newlines=True,
stderr=subprocess.PIPE,
)
out, err = popen.communicate()
if name == "jython":
if not err or "2.5" not in err:
executable = None
if "2.5.2" in err:
executable = None # http://bugs.jython.org/issue1790
elif popen.returncode != 0:
# handle pyenv's 127
executable = None
cache[name] = executable
return executable
@pytest.fixture(params=["python2.7", "python3.4", "pypy", "pypy3"])
def anypython(request):
name = request.param
executable = getexecutable(name)
if executable is None:
if sys.platform == "win32":
executable = winpymap.get(name, None)
if executable:
executable = py.path.local(executable)
if executable.check():
return executable
pytest.skip("no suitable %s found" % (name,))
return executable
# used at least by pytest-xdist plugin
@pytest.fixture
def _pytest(request):
"""Return a helper which offers a gethookrecorder(hook) method which
returns a HookRecorder instance which helps to make assertions about called
hooks.
"""
return PytestArg(request)
class PytestArg(object):
def __init__(self, request):
self.request = request
def gethookrecorder(self, hook):
hookrecorder = HookRecorder(hook._pm)
self.request.addfinalizer(hookrecorder.finish_recording)
return hookrecorder
def get_public_names(values):
"""Only return names from iterator values without a leading underscore."""
return [x for x in values if x[0] != "_"]
class ParsedCall(object):
def __init__(self, name, kwargs):
self.__dict__.update(kwargs)
self._name = name
def __repr__(self):
d = self.__dict__.copy()
del d["_name"]
return "<ParsedCall %r(**%r)>" % (self._name, d)
class HookRecorder(object):
"""Record all hooks called in a plugin manager.
This wraps all the hook calls in the plugin manager, recording each call
before propagating the normal calls.
"""
def __init__(self, pluginmanager):
self._pluginmanager = pluginmanager
self.calls = []
def before(hook_name, hook_impls, kwargs):
self.calls.append(ParsedCall(hook_name, kwargs))
def after(outcome, hook_name, hook_impls, kwargs):
pass
self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after)
def finish_recording(self):
self._undo_wrapping()
def getcalls(self, names):
if isinstance(names, str):
names = names.split()
return [call for call in self.calls if call._name in names]
def assert_contains(self, entries):
__tracebackhide__ = True
i = 0
entries = list(entries)
backlocals = sys._getframe(1).f_locals
while entries:
name, check = entries.pop(0)
for ind, call in enumerate(self.calls[i:]):
if call._name == name:
print("NAMEMATCH", name, call)
if eval(check, backlocals, call.__dict__):
print("CHECKERMATCH", repr(check), "->", call)
else:
print("NOCHECKERMATCH", repr(check), "-", call)
continue
i += ind + 1
break
print("NONAMEMATCH", name, "with", call)
else:
pytest.fail("could not find %r check %r" % (name, check))
def popcall(self, name):
__tracebackhide__ = True
for i, call in enumerate(self.calls):
if call._name == name:
del self.calls[i]
return call
lines = ["could not find call %r, in:" % (name,)]
lines.extend([" %s" % str(x) for x in self.calls])
pytest.fail("\n".join(lines))
def getcall(self, name):
values = self.getcalls(name)
assert len(values) == 1, (name, values)
return values[0]
# functionality for test reports
def getreports(self, names="pytest_runtest_logreport pytest_collectreport"):
return [x.report for x in self.getcalls(names)]
def matchreport(
self,
inamepart="",
names="pytest_runtest_logreport pytest_collectreport",
when=None,
):
"""return a testreport whose dotted import path matches"""
values = []
for rep in self.getreports(names=names):
try:
if not when and rep.when != "call" and rep.passed:
# setup/teardown passing reports - let's ignore those
continue
except AttributeError:
pass
if when and getattr(rep, "when", None) != when:
continue
if not inamepart or inamepart in rep.nodeid.split("::"):
values.append(rep)
if not values:
raise ValueError(
"could not find test report matching %r: "
"no test reports at all!" % (inamepart,)
)
if len(values) > 1:
raise ValueError(
"found 2 or more testreports matching %r: %s" % (inamepart, values)
)
return values[0]
def getfailures(self, names="pytest_runtest_logreport pytest_collectreport"):
return [rep for rep in self.getreports(names) if rep.failed]
def getfailedcollections(self):
return self.getfailures("pytest_collectreport")
def listoutcomes(self):
passed = []
skipped = []
failed = []
for rep in self.getreports("pytest_collectreport pytest_runtest_logreport"):
if rep.passed:
if getattr(rep, "when", None) == "call":
passed.append(rep)
elif rep.skipped:
skipped.append(rep)
elif rep.failed:
failed.append(rep)
return passed, skipped, failed
def countoutcomes(self):
return [len(x) for x in self.listoutcomes()]
def assertoutcome(self, passed=0, skipped=0, failed=0):
realpassed, realskipped, realfailed = self.listoutcomes()
assert passed == len(realpassed)
assert skipped == len(realskipped)
assert failed == len(realfailed)
def clear(self):
self.calls[:] = []
@pytest.fixture
def linecomp(request):
return LineComp()
@pytest.fixture(name="LineMatcher")
def LineMatcher_fixture(request):
return LineMatcher
@pytest.fixture
def testdir(request, tmpdir_factory):
return Testdir(request, tmpdir_factory)
rex_outcome = re.compile(r"(\d+) ([\w-]+)")
class RunResult(object):
"""The result of running a command.
Attributes:
:ret: the return value
:outlines: list of lines captured from stdout
:errlines: list of lines captures from stderr
:stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to
reconstruct stdout or the commonly used ``stdout.fnmatch_lines()``
method
:stderr: :py:class:`LineMatcher` of stderr
:duration: duration in seconds
"""
def __init__(self, ret, outlines, errlines, duration):
self.ret = ret
self.outlines = outlines
self.errlines = errlines
self.stdout = LineMatcher(outlines)
self.stderr = LineMatcher(errlines)
self.duration = duration
def parseoutcomes(self):
"""Return a dictionary of outcomestring->num from parsing the terminal
output that the test process produced.
"""
for line in reversed(self.outlines):
if "seconds" in line:
outcomes = rex_outcome.findall(line)
if outcomes:
d = {}
for num, cat in outcomes:
d[cat] = int(num)
return d
raise ValueError("Pytest terminal report not found")
def assert_outcomes(self, passed=0, skipped=0, failed=0, error=0):
"""Assert that the specified outcomes appear with the respective
numbers (0 means it didn't occur) in the text output from a test run.
"""
d = self.parseoutcomes()
obtained = {
"passed": d.get("passed", 0),
"skipped": d.get("skipped", 0),
"failed": d.get("failed", 0),
"error": d.get("error", 0),
}
assert obtained == dict(
passed=passed, skipped=skipped, failed=failed, error=error
)
class CwdSnapshot(object):
def __init__(self):
self.__saved = os.getcwd()
def restore(self):
os.chdir(self.__saved)
class SysModulesSnapshot(object):
def __init__(self, preserve=None):
self.__preserve = preserve
self.__saved = dict(sys.modules)
def restore(self):
if self.__preserve:
self.__saved.update(
(k, m) for k, m in sys.modules.items() if self.__preserve(k)
)
sys.modules.clear()
sys.modules.update(self.__saved)
class SysPathsSnapshot(object):
def __init__(self):
self.__saved = list(sys.path), list(sys.meta_path)
def restore(self):
sys.path[:], sys.meta_path[:] = self.__saved
class Testdir(object):
"""Temporary test directory with tools to test/run pytest itself.
This is based on the ``tmpdir`` fixture but provides a number of methods
which aid with testing pytest itself. Unless :py:meth:`chdir` is used all
methods will use :py:attr:`tmpdir` as their current working directory.
Attributes:
:tmpdir: The :py:class:`py.path.local` instance of the temporary directory.
:plugins: A list of plugins to use with :py:meth:`parseconfig` and
:py:meth:`runpytest`. Initially this is an empty list but plugins can
be added to the list. The type of items to add to the list depends on
the method using them so refer to them for details.
"""
def __init__(self, request, tmpdir_factory):
self.request = request
self._mod_collections = WeakKeyDictionary()
name = request.function.__name__
self.tmpdir = tmpdir_factory.mktemp(name, numbered=True)
self.plugins = []
self._cwd_snapshot = CwdSnapshot()
self._sys_path_snapshot = SysPathsSnapshot()
self._sys_modules_snapshot = self.__take_sys_modules_snapshot()
self.chdir()
self.request.addfinalizer(self.finalize)
method = self.request.config.getoption("--runpytest")
if method == "inprocess":
self._runpytest_method = self.runpytest_inprocess
elif method == "subprocess":
self._runpytest_method = self.runpytest_subprocess
def __repr__(self):
return "<Testdir %r>" % (self.tmpdir,)
def finalize(self):
"""Clean up global state artifacts.
Some methods modify the global interpreter state and this tries to
clean this up. It does not remove the temporary directory however so
it can be looked at after the test run has finished.
"""
self._sys_modules_snapshot.restore()
self._sys_path_snapshot.restore()
self._cwd_snapshot.restore()
def __take_sys_modules_snapshot(self):
# some zope modules used by twisted-related tests keep internal state
# and can't be deleted; we had some trouble in the past with
# `zope.interface` for example
def preserve_module(name):
return name.startswith("zope")
return SysModulesSnapshot(preserve=preserve_module)
def make_hook_recorder(self, pluginmanager):
"""Create a new :py:class:`HookRecorder` for a PluginManager."""
assert not hasattr(pluginmanager, "reprec")
pluginmanager.reprec = reprec = HookRecorder(pluginmanager)
self.request.addfinalizer(reprec.finish_recording)
return reprec
def chdir(self):
"""Cd into the temporary directory.
This is done automatically upon instantiation.
"""
self.tmpdir.chdir()
def _makefile(self, ext, args, kwargs, encoding="utf-8"):
items = list(kwargs.items())
def to_text(s):
return s.decode(encoding) if isinstance(s, bytes) else six.text_type(s)
if args:
source = u"\n".join(to_text(x) for x in args)
basename = self.request.function.__name__
items.insert(0, (basename, source))
ret = None
for basename, value in items:
p = self.tmpdir.join(basename).new(ext=ext)
p.dirpath().ensure_dir()
source = Source(value)
source = u"\n".join(to_text(line) for line in source.lines)
p.write(source.strip().encode(encoding), "wb")
if ret is None:
ret = p
return ret
def makefile(self, ext, *args, **kwargs):
r"""Create new file(s) in the testdir.
:param str ext: The extension the file(s) should use, including the dot, e.g. `.py`.
:param list[str] args: All args will be treated as strings and joined using newlines.
The result will be written as contents to the file. The name of the
file will be based on the test function requesting this fixture.
:param kwargs: Each keyword is the name of a file, while the value of it will
be written as contents of the file.
Examples:
.. code-block:: python
testdir.makefile(".txt", "line1", "line2")
testdir.makefile(".ini", pytest="[pytest]\naddopts=-rs\n")
"""
return self._makefile(ext, args, kwargs)
def makeconftest(self, source):
"""Write a contest.py file with 'source' as contents."""
return self.makepyfile(conftest=source)
def makeini(self, source):
"""Write a tox.ini file with 'source' as contents."""
return self.makefile(".ini", tox=source)
def getinicfg(self, source):
"""Return the pytest section from the tox.ini config file."""
p = self.makeini(source)
return py.iniconfig.IniConfig(p)["pytest"]
def makepyfile(self, *args, **kwargs):
"""Shortcut for .makefile() with a .py extension."""
return self._makefile(".py", args, kwargs)
def maketxtfile(self, *args, **kwargs):
"""Shortcut for .makefile() with a .txt extension."""
return self._makefile(".txt", args, kwargs)
def syspathinsert(self, path=None):
"""Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`.
This is undone automatically when this object dies at the end of each
test.
"""
if path is None:
path = self.tmpdir
sys.path.insert(0, str(path))
# a call to syspathinsert() usually means that the caller wants to
# import some dynamically created files, thus with python3 we
# invalidate its import caches
self._possibly_invalidate_import_caches()
def _possibly_invalidate_import_caches(self):
# invalidate caches if we can (py33 and above)
try:
import importlib
except ImportError:
pass
else:
if hasattr(importlib, "invalidate_caches"):
importlib.invalidate_caches()
def mkdir(self, name):
"""Create a new (sub)directory."""
return self.tmpdir.mkdir(name)
def mkpydir(self, name):
"""Create a new python package.
This creates a (sub)directory with an empty ``__init__.py`` file so it
gets recognised as a python package.
"""
p = self.mkdir(name)
p.ensure("__init__.py")
return p
def copy_example(self, name=None):
from . import experiments
import warnings
warnings.warn(experiments.PYTESTER_COPY_EXAMPLE, stacklevel=2)
example_dir = self.request.config.getini("pytester_example_dir")
if example_dir is None:
raise ValueError("pytester_example_dir is unset, can't copy examples")
example_dir = self.request.config.rootdir.join(example_dir)
for extra_element in self.request.node.iter_markers("pytester_example_path"):
assert extra_element.args
example_dir = example_dir.join(*extra_element.args)
if name is None:
func_name = self.request.function.__name__
maybe_dir = example_dir / func_name
maybe_file = example_dir / (func_name + ".py")
if maybe_dir.isdir():
example_path = maybe_dir
elif maybe_file.isfile():
example_path = maybe_file
else:
raise LookupError(
"{} cant be found as module or package in {}".format(
func_name, example_dir.bestrelpath(self.request.confg.rootdir)
)
)
else:
example_path = example_dir.join(name)
if example_path.isdir() and not example_path.join("__init__.py").isfile():
example_path.copy(self.tmpdir)
return self.tmpdir
elif example_path.isfile():
result = self.tmpdir.join(example_path.basename)
example_path.copy(result)
return result
else:
raise LookupError("example is not found as a file or directory")
Session = Session
def getnode(self, config, arg):
"""Return the collection node of a file.
:param config: :py:class:`_pytest.config.Config` instance, see
:py:meth:`parseconfig` and :py:meth:`parseconfigure` to create the
configuration
:param arg: a :py:class:`py.path.local` instance of the file
"""
session = Session(config)
assert "::" not in str(arg)
p = py.path.local(arg)
config.hook.pytest_sessionstart(session=session)
res = session.perform_collect([str(p)], genitems=False)[0]
config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
return res
def getpathnode(self, path):
"""Return the collection node of a file.
This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to
create the (configured) pytest Config instance.
:param path: a :py:class:`py.path.local` instance of the file
"""
config = self.parseconfigure(path)
session = Session(config)
x = session.fspath.bestrelpath(path)
config.hook.pytest_sessionstart(session=session)
res = session.perform_collect([x], genitems=False)[0]
config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
return res
def genitems(self, colitems):
"""Generate all test items from a collection node.
This recurses into the collection node and returns a list of all the
test items contained within.
"""
session = colitems[0].session
result = []
for colitem in colitems:
result.extend(session.genitems(colitem))
return result
def runitem(self, source):
"""Run the "test_func" Item.
The calling test instance (class containing the test method) must
provide a ``.getrunner()`` method which should return a runner which
can run the test protocol for a single item, e.g.
:py:func:`_pytest.runner.runtestprotocol`.
"""
# used from runner functional tests
item = self.getitem(source)
# the test class where we are called from wants to provide the runner
testclassinstance = self.request.instance
runner = testclassinstance.getrunner()
return runner(item)
def inline_runsource(self, source, *cmdlineargs):
"""Run a test module in process using ``pytest.main()``.
This run writes "source" into a temporary file and runs
``pytest.main()`` on it, returning a :py:class:`HookRecorder` instance
for the result.
:param source: the source code of the test module
:param cmdlineargs: any extra command line arguments to use
:return: :py:class:`HookRecorder` instance of the result
"""
p = self.makepyfile(source)
values = list(cmdlineargs) + [p]
return self.inline_run(*values)
def inline_genitems(self, *args):
"""Run ``pytest.main(['--collectonly'])`` in-process.
Runs the :py:func:`pytest.main` function to run all of pytest inside
the test process itself like :py:meth:`inline_run`, but returns a
tuple of the collected items and a :py:class:`HookRecorder` instance.
"""
rec = self.inline_run("--collect-only", *args)
items = [x.item for x in rec.getcalls("pytest_itemcollected")]
return items, rec
def inline_run(self, *args, **kwargs):
"""Run ``pytest.main()`` in-process, returning a HookRecorder.
Runs the :py:func:`pytest.main` function to run all of pytest inside
the test process itself. This means it can return a
:py:class:`HookRecorder` instance which gives more detailed results
from that run than can be done by matching stdout/stderr from
:py:meth:`runpytest`.
:param args: command line arguments to pass to :py:func:`pytest.main`
:param plugin: (keyword-only) extra plugin instances the
``pytest.main()`` instance should use
:return: a :py:class:`HookRecorder` instance
"""
finalizers = []
try:
# When running pytest inline any plugins active in the main test
# process are already imported. So this disables the warning which
# will trigger to say they can no longer be rewritten, which is
# fine as they have already been rewritten.
orig_warn = AssertionRewritingHook._warn_already_imported
def revert_warn_already_imported():
AssertionRewritingHook._warn_already_imported = orig_warn
finalizers.append(revert_warn_already_imported)
AssertionRewritingHook._warn_already_imported = lambda *a: None
# Any sys.module or sys.path changes done while running pytest
# inline should be reverted after the test run completes to avoid
# clashing with later inline tests run within the same pytest test,
# e.g. just because they use matching test module names.
finalizers.append(self.__take_sys_modules_snapshot().restore)
finalizers.append(SysPathsSnapshot().restore)
# Important note:
# - our tests should not leave any other references/registrations
# laying around other than possibly loaded test modules
# referenced from sys.modules, as nothing will clean those up
# automatically
rec = []
class Collect(object):
def pytest_configure(x, config):
rec.append(self.make_hook_recorder(config.pluginmanager))
plugins = kwargs.get("plugins") or []
plugins.append(Collect())
ret = pytest.main(list(args), plugins=plugins)
if len(rec) == 1:
reprec = rec.pop()
else:
class reprec(object):
pass
reprec.ret = ret
# typically we reraise keyboard interrupts from the child run
# because it's our user requesting interruption of the testing
if ret == 2 and not kwargs.get("no_reraise_ctrlc"):
calls = reprec.getcalls("pytest_keyboard_interrupt")
if calls and calls[-1].excinfo.type == KeyboardInterrupt:
raise KeyboardInterrupt()
return reprec
finally:
for finalizer in finalizers:
finalizer()
def runpytest_inprocess(self, *args, **kwargs):
"""Return result of running pytest in-process, providing a similar
interface to what self.runpytest() provides.
"""
if kwargs.get("syspathinsert"):
self.syspathinsert()
now = time.time()
capture = MultiCapture(Capture=SysCapture)
capture.start_capturing()
try:
try:
reprec = self.inline_run(*args, **kwargs)
except SystemExit as e:
class reprec(object):
ret = e.args[0]
except Exception:
traceback.print_exc()
class reprec(object):
ret = 3
finally:
out, err = capture.readouterr()
capture.stop_capturing()
sys.stdout.write(out)
sys.stderr.write(err)
res = RunResult(reprec.ret, out.split("\n"), err.split("\n"), time.time() - now)
res.reprec = reprec
return res
def runpytest(self, *args, **kwargs):
"""Run pytest inline or in a subprocess, depending on the command line
option "--runpytest" and return a :py:class:`RunResult`.
"""
args = self._ensure_basetemp(args)
return self._runpytest_method(*args, **kwargs)
def _ensure_basetemp(self, args):
args = [str(x) for x in args]
for x in args:
if str(x).startswith("--basetemp"):
# print("basedtemp exists: %s" %(args,))
break
else:
args.append("--basetemp=%s" % self.tmpdir.dirpath("basetemp"))
# print("added basetemp: %s" %(args,))
return args
def parseconfig(self, *args):
"""Return a new pytest Config instance from given commandline args.
This invokes the pytest bootstrapping code in _pytest.config to create
a new :py:class:`_pytest.core.PluginManager` and call the
pytest_cmdline_parse hook to create a new
:py:class:`_pytest.config.Config` instance.
If :py:attr:`plugins` has been populated they should be plugin modules
to be registered with the PluginManager.
"""
args = self._ensure_basetemp(args)
import _pytest.config
config = _pytest.config._prepareconfig(args, self.plugins)
# we don't know what the test will do with this half-setup config
# object and thus we make sure it gets unconfigured properly in any
# case (otherwise capturing could still be active, for example)
self.request.addfinalizer(config._ensure_unconfigure)
return config
def parseconfigure(self, *args):
"""Return a new pytest configured Config instance.
This returns a new :py:class:`_pytest.config.Config` instance like
:py:meth:`parseconfig`, but also calls the pytest_configure hook.
"""
config = self.parseconfig(*args)
config._do_configure()
self.request.addfinalizer(config._ensure_unconfigure)
return config
def getitem(self, source, funcname="test_func"):
"""Return the test item for a test function.
This writes the source to a python file and runs pytest's collection on
the resulting module, returning the test item for the requested
function name.
:param source: the module source
:param funcname: the name of the test function for which to return a
test item
"""
items = self.getitems(source)
for item in items:
if item.name == funcname:
return item
assert 0, "%r item not found in module:\n%s\nitems: %s" % (
funcname,
source,
items,
)
def getitems(self, source):
"""Return all test items collected from the module.
This writes the source to a python file and runs pytest's collection on
the resulting module, returning all test items contained within.
"""
modcol = self.getmodulecol(source)
return self.genitems([modcol])
def getmodulecol(self, source, configargs=(), withinit=False):
"""Return the module collection node for ``source``.
This writes ``source`` to a file using :py:meth:`makepyfile` and then
runs the pytest collection on it, returning the collection node for the
test module.
:param source: the source code of the module to collect
:param configargs: any extra arguments to pass to
:py:meth:`parseconfigure`
:param withinit: whether to also write an ``__init__.py`` file to the
same directory to ensure it is a package
"""
if isinstance(source, Path):
path = self.tmpdir.join(str(source))
assert not withinit, "not supported for paths"
else:
kw = {self.request.function.__name__: Source(source).strip()}
path = self.makepyfile(**kw)
if withinit:
self.makepyfile(__init__="#")
self.config = config = self.parseconfigure(path, *configargs)
return self.getnode(config, path)
def collect_by_name(self, modcol, name):
"""Return the collection node for name from the module collection.
This will search a module collection node for a collection node
matching the given name.
:param modcol: a module collection node; see :py:meth:`getmodulecol`
:param name: the name of the node to return
"""
if modcol not in self._mod_collections:
self._mod_collections[modcol] = list(modcol.collect())
for colitem in self._mod_collections[modcol]:
if colitem.name == name:
return colitem
def popen(self, cmdargs, stdout, stderr, **kw):
"""Invoke subprocess.Popen.
This calls subprocess.Popen making sure the current working directory
is in the PYTHONPATH.
You probably want to use :py:meth:`run` instead.
"""
env = os.environ.copy()
env["PYTHONPATH"] = os.pathsep.join(
filter(None, [str(os.getcwd()), env.get("PYTHONPATH", "")])
)
kw["env"] = env
popen = subprocess.Popen(
cmdargs, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, **kw
)
popen.stdin.close()
return popen
def run(self, *cmdargs):
"""Run a command with arguments.
Run a process using subprocess.Popen saving the stdout and stderr.
Returns a :py:class:`RunResult`.
"""
return self._run(*cmdargs)
def _run(self, *cmdargs):
cmdargs = [str(x) for x in cmdargs]
p1 = self.tmpdir.join("stdout")
p2 = self.tmpdir.join("stderr")
print("running:", " ".join(cmdargs))
print(" in:", str(py.path.local()))
f1 = codecs.open(str(p1), "w", encoding="utf8")
f2 = codecs.open(str(p2), "w", encoding="utf8")
try:
now = time.time()
popen = self.popen(
cmdargs, stdout=f1, stderr=f2, close_fds=(sys.platform != "win32")
)
ret = popen.wait()
finally:
f1.close()
f2.close()
f1 = codecs.open(str(p1), "r", encoding="utf8")
f2 = codecs.open(str(p2), "r", encoding="utf8")
try:
out = f1.read().splitlines()
err = f2.read().splitlines()
finally:
f1.close()
f2.close()
self._dump_lines(out, sys.stdout)
self._dump_lines(err, sys.stderr)
return RunResult(ret, out, err, time.time() - now)
def _dump_lines(self, lines, fp):
try:
for line in lines:
print(line, file=fp)
except UnicodeEncodeError:
print("couldn't print to %s because of encoding" % (fp,))
def _getpytestargs(self):
return (sys.executable, "-mpytest")
def runpython(self, script):
"""Run a python script using sys.executable as interpreter.
Returns a :py:class:`RunResult`.
"""
return self.run(sys.executable, script)
def runpython_c(self, command):
"""Run python -c "command", return a :py:class:`RunResult`."""
return self.run(sys.executable, "-c", command)
def runpytest_subprocess(self, *args, **kwargs):
"""Run pytest as a subprocess with given arguments.
Any plugins added to the :py:attr:`plugins` list will added using the
``-p`` command line option. Additionally ``--basetemp`` is used put
any temporary files and directories in a numbered directory prefixed
with "runpytest-" so they do not conflict with the normal numbered
pytest location for temporary files and directories.
Returns a :py:class:`RunResult`.
"""
p = py.path.local.make_numbered_dir(
prefix="runpytest-", keep=None, rootdir=self.tmpdir
)
args = ("--basetemp=%s" % p,) + args
plugins = [x for x in self.plugins if isinstance(x, str)]
if plugins:
args = ("-p", plugins[0]) + args
args = self._getpytestargs() + args
return self.run(*args)
def spawn_pytest(self, string, expect_timeout=10.0):
"""Run pytest using pexpect.
This makes sure to use the right pytest and sets up the temporary
directory locations.
The pexpect child is returned.
"""
basetemp = self.tmpdir.mkdir("temp-pexpect")
invoke = " ".join(map(str, self._getpytestargs()))
cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string)
return self.spawn(cmd, expect_timeout=expect_timeout)
def spawn(self, cmd, expect_timeout=10.0):
"""Run a command using pexpect.
The pexpect child is returned.
"""
pexpect = pytest.importorskip("pexpect", "3.0")
if hasattr(sys, "pypy_version_info") and "64" in platform.machine():
pytest.skip("pypy-64 bit not supported")
if sys.platform.startswith("freebsd"):
pytest.xfail("pexpect does not work reliably on freebsd")
logfile = self.tmpdir.join("spawn.out").open("wb")
child = pexpect.spawn(cmd, logfile=logfile)
self.request.addfinalizer(logfile.close)
child.timeout = expect_timeout
return child
def getdecoded(out):
try:
return out.decode("utf-8")
except UnicodeDecodeError:
return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % (
py.io.saferepr(out),
)
class LineComp(object):
def __init__(self):
self.stringio = py.io.TextIO()
def assert_contains_lines(self, lines2):
"""Assert that lines2 are contained (linearly) in lines1.
Return a list of extralines found.
"""
__tracebackhide__ = True
val = self.stringio.getvalue()
self.stringio.truncate(0)
self.stringio.seek(0)
lines1 = val.split("\n")
return LineMatcher(lines1).fnmatch_lines(lines2)
class LineMatcher(object):
"""Flexible matching of text.
This is a convenience class to test large texts like the output of
commands.
The constructor takes a list of lines without their trailing newlines, i.e.
``text.splitlines()``.
"""
def __init__(self, lines):
self.lines = lines
self._log_output = []
def str(self):
"""Return the entire original text."""
return "\n".join(self.lines)
def _getlines(self, lines2):
if isinstance(lines2, str):
lines2 = Source(lines2)
if isinstance(lines2, Source):
lines2 = lines2.strip().lines
return lines2
def fnmatch_lines_random(self, lines2):
"""Check lines exist in the output using in any order.
Lines are checked using ``fnmatch.fnmatch``. The argument is a list of
lines which have to occur in the output, in any order.
"""
self._match_lines_random(lines2, fnmatch)
def re_match_lines_random(self, lines2):
"""Check lines exist in the output using ``re.match``, in any order.
The argument is a list of lines which have to occur in the output, in
any order.
"""
self._match_lines_random(lines2, lambda name, pat: re.match(pat, name))
def _match_lines_random(self, lines2, match_func):
"""Check lines exist in the output.
The argument is a list of lines which have to occur in the output, in
any order. Each line can contain glob whildcards.
"""
lines2 = self._getlines(lines2)
for line in lines2:
for x in self.lines:
if line == x or match_func(x, line):
self._log("matched: ", repr(line))
break
else:
self._log("line %r not found in output" % line)
raise ValueError(self._log_text)
def get_lines_after(self, fnline):
"""Return all lines following the given line in the text.
The given line can contain glob wildcards.
"""
for i, line in enumerate(self.lines):
if fnline == line or fnmatch(line, fnline):
return self.lines[i + 1 :]
raise ValueError("line %r not found in output" % fnline)
def _log(self, *args):
self._log_output.append(" ".join((str(x) for x in args)))
@property
def _log_text(self):
return "\n".join(self._log_output)
def fnmatch_lines(self, lines2):
"""Search captured text for matching lines using ``fnmatch.fnmatch``.
The argument is a list of lines which have to match and can use glob
wildcards. If they do not match a pytest.fail() is called. The
matches and non-matches are also printed on stdout.
"""
self._match_lines(lines2, fnmatch, "fnmatch")
def re_match_lines(self, lines2):
"""Search captured text for matching lines using ``re.match``.
The argument is a list of lines which have to match using ``re.match``.
If they do not match a pytest.fail() is called.
The matches and non-matches are also printed on stdout.
"""
self._match_lines(lines2, lambda name, pat: re.match(pat, name), "re.match")
def _match_lines(self, lines2, match_func, match_nickname):
"""Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``.
:param list[str] lines2: list of string patterns to match. The actual
format depends on ``match_func``
:param match_func: a callable ``match_func(line, pattern)`` where line
is the captured line from stdout/stderr and pattern is the matching
pattern
:param str match_nickname: the nickname for the match function that
will be logged to stdout when a match occurs
"""
lines2 = self._getlines(lines2)
lines1 = self.lines[:]
nextline = None
extralines = []
__tracebackhide__ = True
for line in lines2:
nomatchprinted = False
while lines1:
nextline = lines1.pop(0)
if line == nextline:
self._log("exact match:", repr(line))
break
elif match_func(nextline, line):
self._log("%s:" % match_nickname, repr(line))
self._log(" with:", repr(nextline))
break
else:
if not nomatchprinted:
self._log("nomatch:", repr(line))
nomatchprinted = True
self._log(" and:", repr(nextline))
extralines.append(nextline)
else:
self._log("remains unmatched: %r" % (line,))
pytest.fail(self._log_text)
|
py | b40a9ec5def02491099a9d250a17ce1c63f602f9 | #!/usr/bin/env python3
# Author: Sahil
import pickle
import pandas as pd
import numpy as np
import scipy.spatial.distance as dist
import random
import math
from random import sample, randint
from collections import defaultdict, Counter
from sklearn.metrics import accuracy_score
from collections import Counter
from sklearn.ensemble import AdaBoostClassifier
train_data_loc = '../data/train-data.txt'
test_data_loc = '../data/test-data.txt'
train_df = pd.read_csv(train_data_loc, header=None, delim_whitespace=True)
test_df = pd.read_csv(test_data_loc, header=None, delim_whitespace=True)
train_data = np.array(np.array(train_df)[:, 2:], dtype=int)
train_label = np.array(np.array(train_df)[:, 1].T, dtype=int)
train_label.resize((train_label.shape[0], 1))
test_data = np.array(np.array(test_df)[:, 2:], dtype=int)
test_label = np.array(np.array(test_df)[:, 1].T, dtype=int)
test_label.resize((test_label.shape[0], 1))
print(train_data.shape, test_data.shape)
print(train_label.shape, test_label.shape)
length = len(train_data)
i = 0
hypthesis_count = 200
result = []
predictions = [0, 90, 180, 270]
list_of_alphas = []
list_of_decision_stumps = []
# these are the only predictions possible
# for each we get one hypotheisi: a0h0 + a1h1
def normalise(arr):
s = sum(arr)
for i in range(len(arr)):
arr[i] = arr[i]/s
return arr
alpha = []
for predict in predictions:
decision_stumps = []
hyp = []
weights = [1.0 / length] * length
for i in range(hypthesis_count):
# till hypothesis count to get series till a99* h99
error = 0
test = []
# giving equal weights for all
# we will classify each image if it is 0 or not, 90 or not and so on
labels = [0] * len(train_data)
# [first_index, second_index]= random.sample(range(0, 191), 2)
first_index = random.randint(0, 191)
second_index = random.randint(0, 191)
for m, row in enumerate(train_data):
first_number = row[first_index]
second_number = row[second_index]
test.append(first_number-second_number)
# hypothesis evaluation
if first_number - second_number > 0:
labels[m] = predict
else:
labels[m] = -1
# error is sum of all the weights which we predicted wrong
for j in range(length):
if labels[j] != train_label[j][0]:
error = error + weights[j]
# updating the weight for each training set
for k in range(length):
if labels[k] == train_label[k][0]:
weights[k] = weights[k] * (error / (1 - error))
# weights[k] = weights[k] * ((1-error) /error)
normalise(weights)
hyp.append((math.log((1 - error) / error)))
# hyp.append((math.log((error)/ (1-error))))
decision_stumps.append([first_index, second_index])
# keeping the a part of the hypothesis, we will have 100 such values
alpha.append(hyp)
list_of_decision_stumps.append(decision_stumps)
print("adding hypothesis")
# hypothesis will have four hypothesis
# list_of_alphas.append(alpha)
# testing
correct = 0
for z in range(len(test_data)):
row = test_data[z]
max_value = 0
first = 0
second = 0
s = 0
for i in range(len(alpha)):
for j in range(len(alpha[i])):
s = s + alpha[i][j]*(row[list_of_decision_stumps[i]
[j][0]] - row[list_of_decision_stumps[i][j][1]])
if s > max_value:
max_value = s
tag = predictions[i]
if tag == test_label[z]:
correct += 1
# for index, row in enumerate(test_data):
print(correct)
print(len(test_data))
print(correct/len(test_data))
#
# may be max possible accuracy
#clf = AdaBoostClassifier()
#clf.fit(train_data, train_label.ravel())
#pred_data = clf.predict(test_data)
#score = accuracy_score(test_label.ravel(), pred_data)
#print('The accuracy for adaboost classification is {}'.format(score))
|
py | b40a9fd2db64a28311e40d3421cff38c574474f3 | import json
from app import db
from app.models import Ingredient, User, Ingredient, Category, Recipe, Mealtype, RecipeIngredients, Rating, RecipeInstructions
from sqlalchemy import func
import random
import datetime
########################################### SETUP INGREDIENTS AND CATEGORIES #############################################
def seed_db():
db.drop_all()
db.create_all()
random.seed(333)
# Load json
input_file=open('data_seed/categories.json', 'r', encoding='utf8')
json_decode=json.load(input_file)
for item in json_decode['categories']:
category_object = Category(name=item)
db.session.add(category_object)
# Load json
input_file=open('data_seed/ingredients.json', 'r', encoding='utf8')
json_decode=json.load(input_file)
for item in json_decode['ingredients']:
category = Category.query.filter_by(name=item['strType']).first()
ingredient = Ingredient(name=item['strIngredient'])
ingredient.categories.append(category)
db.session.add(ingredient)
########################################################################################################################
########################################### SETUP MEALTYPES ############################################################
# Load json
input_file=open('data_seed/mealtypes.json', 'r', encoding='utf8')
json_decode=json.load(input_file)
for item in json_decode['categories']:
if (item['strCategory'] != None):
mealtype = Mealtype(name=item['strCategory'])
db.session.add(mealtype)
########################################################################################################################
########################################### SETUP RECIPES ##############################################################
user = User(username='admin', email='[email protected]')
user.hash_password('admin')
db.session.add(user)
########################################################################################################################
# Load json
input_file=open('data_seed/recipes.json', 'r', encoding='utf8')
json_decode=json.load(input_file)
MAX_INGREDIENTS = 20
for item in json_decode['meals']:
# Make new recipe
recipe = Recipe(name=item['strMeal'], image=item['strMealThumb'])
db.session.add(recipe)
for instruction in item['strInstructions'].splitlines():
if len(instruction) > 2:
recipe_instruction = RecipeInstructions(instruction=instruction)
recipe.instructions.append(recipe_instruction)
mealtype = Mealtype.query.filter_by(name=item['strCategory']).first()
recipe.mealtypes.append(mealtype)
for i in range(1, MAX_INGREDIENTS + 1):
ingredient_name = item['strIngredient' + str(i)]
measure = item['strMeasure' + str(i)]
if ingredient_name and measure:
ingredient = Ingredient.query.filter(func.lower(Ingredient.name) == func.lower(ingredient_name)).first()
if ingredient:
recipe_ingredient = RecipeIngredients(quantity=measure)
recipe_ingredient.ingredients = ingredient
recipe.ingredients.append(recipe_ingredient)
else:
break
user.recipes.append(recipe)
# Load json
input_file=open('data_seed/recipes2.json', 'r', encoding='utf8')
json_decode=json.load(input_file)
for item in json_decode['meals']:
# Make new recipe
if 'image' in item:
recipe = Recipe(name=item['name'], image=item['image'])
else:
recipe = Recipe(name=item['name'])
db.session.add(recipe)
for instruction in item['instruction'].splitlines():
recipe_instruction = RecipeInstructions(instruction=instruction)
recipe.instructions.append(recipe_instruction)
mealtype = Mealtype.query.filter_by(name=item['mealtype']).first()
recipe.mealtypes.append(mealtype)
ingredients = item['ingredients']
for ingredient in ingredients:
db_ingredient = Ingredient.query.filter_by(name=ingredient['name']).first()
if db_ingredient:
recipe_ingredient = RecipeIngredients(quantity=ingredient['quantity'])
recipe_ingredient.ingredients = db_ingredient
recipe.ingredients.append(recipe_ingredient)
user.recipes.append(recipe)
db.session.commit()
########################################################################################################################
########################################### ADD RANDOM RATINGS #########################################################
newUser = User(username='Emmanuel', email='[email protected]')
newUser.hash_password('test')
db.session.add(newUser)
newUser = User(username='kenny', email='[email protected]')
newUser.hash_password('test')
db.session.add(newUser)
recipes = Recipe.query.all()
for recipe in recipes:
if random.randrange(10) > 3:
rating = Rating(rating=(random.randrange(5) + 1), comment='Demo Comment.')
recipe.rating.append(rating)
newUser.rating.append(rating)
db.session.commit()
print('Database has been seeded')
|
py | b40aa035a44a45d15ff02420fe7ab6f5812509e4 | from datetime import date
import scrapy
class CovidScraper(scrapy.Spider):
name = "Bundesregierung_scraper"
start_urls = ["https://www.bundesregierung.de/breg-de/themen/coronavirus/ausbreitung-coronavirus-1716188"]
def parse(self, response):
columns = {
"question" : [],
"answer" : [],
"answer_html" : [],
"link" : [],
"name" : [],
"source" : [],
"category" : [],
"country" : [],
"region" : [],
"city" : [],
"lang" : [],
"last_update" : [],
}
QUESTION_ELEMENT_SELECTOR = "h2.mt-3"
QUESTION_SELECTOR = "::text"
questions = response.css(QUESTION_ELEMENT_SELECTOR)
for question_elm in questions:
question = question_elm.css(QUESTION_SELECTOR).getall()
question = " ".join(question).strip()
# all paragraphs till the next question header are considert to be the answer
following_siblings = question_elm.xpath('following-sibling::*')
answer = []
answer_html = []
for elm in following_siblings:
if elm.root.tag == 'p' and 'navToTop' not in elm.root.classes:
answer += elm.css("::text").getall()
answer_html += [elm.get()]
else:
break
answer = "".join(answer).replace('\n', '').strip()
answer_html = " ".join(answer_html).strip()
# add question-answer pair to data dictionary
columns["question"].append(question)
columns["answer"].append(answer)
columns["answer_html"].append(answer_html)
today = date.today()
columns["link"] = ["https://www.bundesregierung.de/breg-de/themen/coronavirus/ausbreitung-coronavirus-1716188"] * len(columns["question"])
columns["name"] = ["Wichtige Fragen und Antworten zum Coronavirus"] * len(columns["question"])
columns["source"] = ["Presse- und Informationsamt der Bundesregierung"] * len(columns["question"])
columns["category"] = [""] * len(columns["question"])
columns["country"] = ["DE"] * len(columns["question"])
columns["region"] = [""] * len(columns["question"])
columns["city"] = [""] * len(columns["question"])
columns["lang"] = ["de"] * len(columns["question"])
columns["last_update"] = [today.strftime("%Y/%m/%d")] * len(columns["question"])
return columns
|
py | b40aa31d2c15da6cc61312edc82cf1e18a10445f | import os
import argparse
import random
import numpy as np
import shutil
from shutil import copyfile
from misc import printProgressBar
from random import sample
def rm_mkdir(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
print('Remove path - %s' % dir_path)
os.makedirs(dir_path)
print('Create path - %s' % dir_path)
if os.path.exists(str(dir_path) + '_balanced'):
shutil.rmtree(str(dir_path) + '_balanced')
print('Remove path - %s'% str(dir_path) + '_balanced')
os.makedirs(str(dir_path) + '_balanced')
print('Create path - %s' % str(dir_path) + '_balanced')
if os.path.exists(str(dir_path) + '_pos'):
shutil.rmtree(str(dir_path) + '_pos')
print('Remove path - %s'% str(dir_path) + '_pos')
os.makedirs(str(dir_path) + '_pos')
print('Create path - %s' % str(dir_path) + '_pos')
def main(config):
rm_mkdir(config.train_path)
rm_mkdir(config.train_GT_path)
rm_mkdir(config.valid_path)
rm_mkdir(config.valid_GT_path)
with open(config.train_list) as f:
lines_train = f.read().splitlines()
with open(config.valid_list) as f:
lines_valid = f.read().splitlines()
num_train = len(lines_train)
num_valid = len(lines_valid)
print('\nNum of train set : ', num_train)
print('\nNum of test set : ', num_valid)
for filename in lines_train:
# original image path
img_train_src = os.path.join(config.origin_data_path, filename)
# original GT path
gt_train_src = os.path.join(config.origin_GT_path, filename)
# GT copyto path and rename positive and negative cases
gt_train_dst_pos = os.path.join(config.train_GT_path, 'pos_' + os.path.basename(filename))
gt_train_dst_neg = os.path.join(config.train_GT_path, 'neg_' + os.path.basename(filename))
gt_train_dst_pos_balanced = os.path.join(config.train_GT_path + '_balanced',
'pos_' + os.path.basename(filename))
if os.path.exists(gt_train_src):
copyfile(gt_train_src, gt_train_dst_pos)
copyfile(img_train_src, os.path.join(config.train_path, 'pos_' + os.path.basename(filename)))
copyfile(gt_train_src, gt_train_dst_pos_balanced)
copyfile(img_train_src, os.path.join(config.train_path+'_pos', 'pos_' + os.path.basename(filename)))
copyfile(gt_train_src, os.path.join(config.train_GT_path + '_pos', 'pos_' + os.path.basename(filename)))
else:
img = np.load(img_train_src)
gt = np.zeros(img.shape)
np.save(gt_train_dst_neg, gt)
copyfile(img_train_src, os.path.join(config.train_path, 'neg_' + os.path.basename(filename)))
for filename in lines_valid:
img_valid_src = os.path.join(config.origin_data_path, filename)
gt_valid_src = os.path.join(config.origin_GT_path, filename)
gt_valid_dst_pos = os.path.join(config.valid_GT_path, 'pos_' + os.path.basename(filename))
gt_valid_dst_neg = os.path.join(config.valid_GT_path, 'neg_' + os.path.basename(filename))
gt_valid_dst_pos_balanced = os.path.join(config.valid_GT_path + '_balanced',
'pos_' + os.path.basename(filename))
if os.path.exists(gt_valid_src):
copyfile(gt_valid_src, gt_valid_dst_pos)
copyfile(img_valid_src, os.path.join(config.valid_path, 'pos_' + os.path.basename(filename)))
copyfile(gt_valid_src, gt_valid_dst_pos_balanced)
copyfile(img_valid_src, os.path.join(config.valid_path + '_pos', 'pos_' + os.path.basename(filename)))
copyfile(gt_valid_src, os.path.join(config.valid_GT_path+'_pos', 'pos_' + os.path.basename(filename)))
else:
img = np.load(img_valid_src)
gt = np.zeros(img.shape)
np.save(gt_valid_dst_neg, gt)
copyfile(img_valid_src, os.path.join(config.valid_path, 'neg_' + os.path.basename(filename)))
# make balanced folder for training data
_, _, train_GT_pos_balanced = next(os.walk(config.train_GT_path + '_balanced'))
pos_size = len(train_GT_pos_balanced)
print("train pos size: ")
print(pos_size)
pos_files = [filename for filename in os.listdir(config.train_GT_path) if filename.startswith('pos_')]
neg_files = [filename for filename in os.listdir(config.train_GT_path) if filename.startswith('neg_')]
for file in pos_files:
copyfile(os.path.join(config.train_path, file), os.path.join(config.train_path+'_balanced', file))
sampled_neg_file = sample(neg_files, pos_size)
for file in sampled_neg_file:
copyfile(os.path.join(config.train_GT_path, file), os.path.join(config.train_GT_path+'_balanced', file))
copyfile(os.path.join(config.train_path, file), os.path.join(config.train_path+'_balanced', file))
# make balanced folder for testing data
_, _, valid_GT_pos_balanced = next(os.walk(config.valid_GT_path + '_balanced'))
pos_size = len(valid_GT_pos_balanced)
print("valid pos size:")
print(pos_size)
pos_files = [filename for filename in os.listdir(config.valid_GT_path) if filename.startswith('pos_')]
neg_files = [filename for filename in os.listdir(config.valid_GT_path) if filename.startswith('neg_')]
for file in pos_files:
copyfile(os.path.join(config.valid_path, file), os.path.join(config.valid_path + '_balanced', file))
sampled_neg_file = sample(neg_files, pos_size)
for file in sampled_neg_file:
copyfile(os.path.join(config.valid_GT_path, file), os.path.join(config.valid_GT_path + '_balanced', file))
copyfile(os.path.join(config.valid_path, file), os.path.join(config.valid_path + '_balanced', file))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# input
parser.add_argument('--train_list', type=str, default='/home/sanford2021/Desktop/lymph/data/txt/train_0.txt')
parser.add_argument('--valid_list', type=str, default='/home/sanford2021/Desktop/lymph/data/txt/valid_0.txt')
parser.add_argument('--train_fold', type=str, default='train_0')
parser.add_argument('--valid_fold', type=str, default='valid_0')
# data path
parser.add_argument('--origin_data_path', type=str, default='/home/sanford2021/Desktop/lymph/data/npy/img/')
parser.add_argument('--origin_GT_path', type=str, default='/home/sanford2021/Desktop/lymph/data/npy/detect/')
# prepared data path
parser.add_argument('--train_path', type=str, default='./dataset/train')
parser.add_argument('--train_GT_path', type=str, default='./dataset/train_GT')
parser.add_argument('--valid_path', type=str, default='./dataset/valid')
parser.add_argument('--valid_GT_path', type=str, default='./dataset/valid_GT')
config = parser.parse_args()
print(config)
main(config)
|
py | b40aa32157d005f8aacfc0bcda7b171f356a34eb | #!/usr/bin/env python3
import argparse
from collections import OrderedDict
import re
import os
import sys
import traceback
###############################################################
# validate_reflection
#
#
# Looks for files with FC_REFLECT macros. Requires fields to match class definition (provided in same file),
# unless the FC_REFLECT is proceeded by "// @ignore <field1>, <field2>, ..., <fieldN>" to indicate that field1,
# field2, ... fieldN are not reflected and/or "// @swap <field1>, <field2>, ..., <fieldN>" to indicate that
# field1, field2, ... fieldN are not in the same order as the class definition.
#
# NOTE: If swapping fields the script expects you to only indicate fields that are not in the expected order,
# so once it runs into the swapped field, it will remove that field from the order and expect the remaining in
# that order, so if the class has field1, field2, field3, and field4, and the reflect macro has the order
# field1, field3, field2, then field4, it should indicate swapping field2. This will remove field2 from the
# expected order and the rest will now match. Alternatively it should indicate swapping field3, since the remaining
# fields will also match the order. But both field2 and field3 should not be indicated.
#
#
#
###############################################################
import atexit
import tempfile
@atexit.register
def close_debug_file():
if debug_file != None:
debug_file.close()
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-?', action='help', default=argparse.SUPPRESS,
help=argparse._('show this help message and exit'))
parser.add_argument('-d', '--debug', help="generate debug output into a temporary directory", action='store_true')
parser.add_argument('-r', '--recurse', help="recurse through an entire directory (if directory provided for \"file\"", action='store_true')
parser.add_argument('-x', '--extension', type=str, help="extensions array to allow for directory and recursive search. Defaults to \".hpp\" and \".cpp\".", action='append')
parser.add_argument('-e', '--exit-on-error', help="Exit immediately when a validation error is discovered. Default is to run validation on all files and directories provided.", action='store_true')
parser.add_argument('files', metavar='file', nargs='+', type=str, help="File containing nodes info in JSON format.")
args = parser.parse_args()
recurse = args.recurse
if args.debug:
temp_dir = tempfile.mkdtemp()
print("temporary files writen to %s" % (temp_dir))
debug_file = open(os.path.join(temp_dir, "validate_reflect.debug"), "w")
else:
debug_file = None
extensions = []
if args.extension is None or len(args.extension) == 0:
extensions = [".hpp",".cpp"]
else:
for extension in args.extension:
assert len(extension) > 0, "empty --extension passed in"
if extension[0] != ".":
extension = "." + extension
extensions.append(extension)
print("extensions=%s" % (",".join(extensions)))
ignore_str = "@ignore"
swap_str = "@swap"
fc_reflect_str = "FC_REFLECT"
fc_reflect_possible_enum_ext = "(?:_ENUM)?"
fc_reflect_derived_ext = "(?:_DERIVED)"
def debug(debug_str):
if debug_file is not None:
debug_file.write(debug_str + "\n")
class EmptyScope:
single_comment_pattern = re.compile(r'//.*\n+')
single_comment_ignore_swap_pattern = re.compile(r'//\s*(?:%s|%s)\s' % (ignore_str, swap_str))
multi_line_comment_pattern = re.compile(r'/\*(.*?)\*/', re.MULTILINE | re.DOTALL)
ignore_swap_pattern = re.compile(r'^\s*(%s|%s)\s+(.*)$' % (ignore_str, swap_str), re.DOTALL)
strip_extra_pattern = re.compile(r'\n\s*\*\s*')
invalid_chars_pattern = re.compile(r'([^\w\s,])')
multi_line_comment_ignore_swap_pattern = re.compile(r'(\w+)(?:\s*,\s*)?')
namespace_str = "namespace"
struct_str = "struct"
class_str = "class"
enum_str = "enum"
any_scope_pattern = re.compile(r'\{', re.DOTALL)
start_char = "{"
end_char = "}"
def __init__(self, name, start, content, parent_scope):
pname = parent_scope.name if parent_scope is not None else ""
debug("EmptyScope.__init__ %s %d - Parent %s" % (name, start, pname))
self.name = name
self.content = content
self.start = start
self.current = start + 1
self.parent_scope = parent_scope
self.end = len(content) - 1 if start == 0 else None
self.children = {}
self.children_ordered = []
self.fields = []
self.usings = {}
self.inherit = None
def read(self):
debug("EmptyScope(%s).read - %s" % (self.__class__.__name__, self.name))
end = len(self.content) - 1
while self.current < end:
next_scope = self.next_scope()
if next_scope is None:
break
self.add(next_scope)
if self.end is None:
self.end = self.content.find(EmptyScope.end_char, self.current, len(self.content))
pdesc = str(self.parent_scope) if self.parent_scope is not None else "<no parent scope>"
assert self.end != -1, "Could not find \"%s\" in \"%s\" - parent scope - %s" % (EmptyScope.end_char, self.content[self.current:], pdesc)
debug("EmptyScope(%s).read - %s - Done at %s" % (self.__class__.__name__, self.name, self.end))
def add(self, child):
debug("EmptyScope.add %s (%s) to %s (%s) - DROP" % (child.name, child.__class__.__name__, self.name, self.__class__.__name__))
pass
def find_scope_start(self, content, start, end, find_str):
debug("EmptyScope.find_scope_start")
loc = content.find(find_str, start, end)
if loc == -1:
return loc
else:
return loc + len(find_str) - len(EmptyScope.start_char)
def find_possible_end(self):
possible = self.content.find(EmptyScope.end_char, self.current)
debug("EmptyScope.find_possible_end current=%s possible end=%s" % (self.current, possible))
return possible
def next_scope(self, end = None):
if end is None:
end = self.find_possible_end()
debug("EmptyScope.next_scope current=%s end=%s" % (self.current, end))
match = EmptyScope.any_scope_pattern.search(self.content[self.current:end])
if match:
start = self.find_scope_start(self.content, self.current, end, EmptyScope.start_char)
new_scope = EmptyScope(None, start, self.content, self)
new_scope.read()
self.current = new_scope.end + 1
return new_scope
return None
def find_class(self, scoped_name):
scope_separator = "::"
loc = scoped_name.find(scope_separator)
if loc != -1:
child_name = scoped_name[0:loc]
loc += len(scope_separator)
child_scoped_name = scoped_name[loc:]
if child_name in self.children:
debug("find_class traverse child_name: %s, child_scoped_name: %s" % (child_name, child_scoped_name))
return self.children[child_name].find_class(child_scoped_name)
elif self.inherit is not None and scoped_name in self.inherit.children:
debug("find_class found scoped_name: %s in inherited: %s" % (scoped_name, self.inherit.name))
return self.inherit.children[scoped_name].find_class(child_scoped_name)
else:
if scoped_name not in self.children:
inherit_children = ",".join(self.inherit.children) if self.inherit is not None else "no inheritance"
inherit_using = ",".join(self.inherit.usings) if self.inherit is not None else "no inheritance"
inherit = self.inherit.name if self.inherit is not None else None
debug("find_class %s not in children, using: %s, inherit: %s - children: %s, using: %s" % (scoped_name, ",".join(self.usings), inherit, inherit_children, inherit_using))
if scoped_name in self.children:
debug("find_class found scoped_name: %s" % (scoped_name))
return self.children[scoped_name]
elif scoped_name in self.usings:
using = self.usings[scoped_name]
debug("find_class found scoped_name: %s, using: %s" % (scoped_name, using))
return self.find_class(using)
elif self.inherit is not None and scoped_name in self.inherit.children:
debug("find_class found scoped_name: %s in inherited: %s" % (scoped_name, self.inherit.name))
return self.inherit.children[scoped_name]
else:
debug("find_class could not find scoped_name: %s, children: %s" % (scoped_name, ",".join(self.children)))
return None
def __str__(self):
indent = ""
next = self.parent_scope
while next is not None:
indent += " "
next = next.parent_scope
desc = "%s%s scope type=\"%s\"\n%s children={\n" % (indent, self.name, self.__class__.__name__, indent)
for child in self.children_ordered:
desc += str(self.children[child]) + "\n"
desc += indent + " }\n"
desc += indent + " fields={\n"
for field in self.fields:
desc += indent + " " + field + "\n"
desc += indent + " }\n"
desc += indent + " usings={\n"
for using in self.usings:
desc += indent + " " + using + ": " + self.usings[using] + "\n"
desc += indent + " }\n"
return desc
def create_scope(type, name, inherit, start, content, parent_scope):
debug("create_scope")
if type == EmptyScope.namespace_str:
return Namespace(name, inherit, start, content, parent_scope)
elif type == EmptyScope.class_str or type == EmptyScope.struct_str:
return ClassStruct(name, inherit, start, content, parent_scope, is_enum = False)
elif type == EmptyScope.enum_str:
return ClassStruct(name, inherit, start, content, parent_scope, is_enum = True)
else:
assert False, "Script does not account for type = \"%s\" found in \"%s\"" % (type, content[start:])
class ClassStruct(EmptyScope):
field_pattern = re.compile(r'\n\s*?(?:mutable\s+)?(\w[\w:\d<>]*)\s+(\w+)\s*(?:=\s[^;]+;|;|=\s*{)', re.MULTILINE | re.DOTALL)
enum_field_pattern = re.compile(r'\n\s*?(\w+)\s*(?:=\s*[^,}\s]+)?\s*(?:,|})', re.MULTILINE | re.DOTALL)
class_pattern = re.compile(r'(%s|%s|%s)\s+(\w+)\s*(:\s*public\s+([^<\s]+)[^{]*)?\s*\{' % (EmptyScope.struct_str, EmptyScope.class_str, EmptyScope.enum_str), re.MULTILINE | re.DOTALL)
cb_obj_pattern = re.compile(r'chainbase::object$')
obj_pattern = re.compile(r'^object$')
using_pattern = re.compile(r'\n\s*?using\s+(\w+)\s*=\s*([\w:]+)(?:<.*>)?;')
def __init__(self, name, inherit, start, content, parent_scope, is_enum):
debug("ClassStruct.__init__ %s %d" % (name, start))
EmptyScope.__init__(self, name, start, content, parent_scope)
self.classes = {}
self.pattern = ClassStruct.class_pattern
self.is_enum = is_enum
self.inherit = None
if inherit is None:
self.ignore_id = False
else:
match = ClassStruct.cb_obj_pattern.search(inherit)
if match is None:
match = ClassStruct.obj_pattern.search(inherit)
self.ignore_id = True if match else False
next = self.parent_scope
while self.inherit is None and next is not None:
self.inherit = next.find_class(inherit)
next = next.parent_scope
debug("Checking for object, ignore_id: %s, inherit: %s, name: %s" % (self.ignore_id, inherit, name))
def add(self, child):
debug("ClassStruct.add %s (%s) to %s (%s)" % (child.name, child.__class__.__name__, self.name, self.__class__.__name__))
if isinstance(child, ClassStruct):
self.classes[child.name] = child
self.children[child.name] = child
self.children_ordered.append(child.name)
def add_fields(self, start, end):
loc = start
while loc < end:
debug("ClassStruct.add_fields -{\n%s\n}" % (self.content[loc:end + 1]))
if self.is_enum:
loc = self.add_enum_field(loc, end)
else:
loc = self.add_field(loc, end)
debug("ClassStruct.add_fields done")
def add_field(self, loc, end):
match = ClassStruct.field_pattern.search(self.content[loc:end + 1])
if match is None:
return end
field = match.group(2)
self.fields.append(field)
all = match.group(0)
loc = self.content.find(all, loc) + len(all)
debug("ClassStruct.add_field - %s (%d) - %s" % (field, len(self.fields), ClassStruct.field_pattern.pattern))
return loc
def add_enum_field(self, loc, end):
match = ClassStruct.enum_field_pattern.search(self.content[loc:end + 1])
if match is None:
return end
field = match.group(1)
self.fields.append(field)
all = match.group(0)
loc = self.content.find(all, loc) + len(all)
debug("ClassStruct.add_enum_field - %s (%d) - %s" % (field, len(self.fields), ClassStruct.enum_field_pattern.pattern))
return loc
def add_usings(self, start, end):
loc = start
while loc < end:
debug("ClassStruct.add_usings -{\n%s\n}" % (self.content[loc:end + 1]))
match = ClassStruct.using_pattern.search(self.content[loc:end])
if match is None:
break
using = match.group(1)
class_struct = match.group(2)
self.usings[using] = class_struct
all = match.group(0)
loc = self.content.find(all, loc) + len(all)
debug("ClassStruct.add_usings - %s (%d)" % (using, len(self.usings)))
debug("ClassStruct.add_usings done")
def next_scope(self, end = None):
new_scope = None
if end is None:
end = self.find_possible_end()
debug("ClassStruct.next_scope end=%s on %s\n\npossible scope={\n\"%s\"\n\n\npattern=%s" % (end, self.name, self.content[self.current:end], self.pattern.pattern))
match = self.pattern.search(self.content[self.current:end])
start = -1
search_str = None
type = None
name = None
inherit = None
if match:
debug("ClassStruct.next_scope match on %s" % (self.name))
search_str = match.group(0)
type = match.group(1)
name = match.group(2)
if len(match.groups()) >= 3:
inherit = match.group(4)
start = self.find_scope_start(self.content, self.current, end, search_str)
debug("all: %s, type: %s, name: %s, start: %s, inherit: %s" % (search_str, type, name, start, inherit))
generic_scope_start = self.find_scope_start(self.content, self.current, end, EmptyScope.start_char)
if start == -1 and generic_scope_start == -1:
debug("ClassStruct.next_scope end=%s no scopes add_fields and exit" % (end))
self.add_fields(self.current, end)
return None
debug("found \"%s\" - \"%s\" - \"%s\" current=%s, start=%s, end=%s, pattern=%s " % (search_str, type, name, self.current, start, end, self.pattern.pattern))
# determine if there is a non-namespace/non-class/non-struct scope before a namespace/class/struct scope
if start != -1 and (generic_scope_start == -1 or start <= generic_scope_start):
debug("found %s at %d" % (type, start))
new_scope = create_scope(type, name, inherit, start, self.content, self)
else:
debug("found EmptyScope (%s) at %d, next scope at %s" % (type, generic_scope_start, start))
new_scope = EmptyScope("", generic_scope_start, self.content, self)
self.add_fields(self.current, new_scope.start)
self.add_usings(self.current, new_scope.start)
new_scope.read()
self.current = new_scope.end + 1
return new_scope
class Namespace(ClassStruct):
namespace_class_pattern = re.compile(r'(%s|%s|%s|%s)\s+(\w+)\s*(:\s*public\s+([^<\s]+)[^{]*)?\s*\{' % (EmptyScope.namespace_str, EmptyScope.struct_str, EmptyScope.class_str, EmptyScope.enum_str), re.MULTILINE | re.DOTALL)
def __init__(self, name, inherit, start, content, parent_scope):
debug("Namespace.__init__ %s %d" % (name, start))
assert inherit is None, "namespace %s should not inherit from %s" % (name, inherit)
ClassStruct.__init__(self, name, None, start, content, parent_scope, is_enum = False)
self.namespaces = {}
self.pattern = Namespace.namespace_class_pattern
def add(self, child):
debug("Namespace.add %s (%s) to %s (%s)" % (child.name, child.__class__.__name__, self.name, self.__class__.__name__))
if isinstance(child, ClassStruct):
ClassStruct.add(self, child)
return
if isinstance(child, Namespace):
self.namespaces[child.name] = child
self.children[child.name] = child
self.children_ordered.append(child.name)
class Reflection:
def __init__(self, name):
self.name = name
self.fields = []
self.ignored = []
self.swapped = []
self.absent = []
class Reflections:
def __init__(self, content):
self.content = content
self.current = 0
self.end = len(content)
self.classes = OrderedDict()
self.with_2_comments = re.compile(r'(//\s*(%s|%s)\s+([^/]*?)\s*\n\s*//\s*(%s|%s)\s+([^/]*?)\s*\n\s*(%s%s\s*\(\s*(\w[^\s<]*))(?:<[^>]*>)?\s*,)' % (ignore_str, swap_str, ignore_str, swap_str, fc_reflect_str, fc_reflect_possible_enum_ext), re.MULTILINE | re.DOTALL)
self.with_comment = re.compile(r'(//\s*(%s|%s)\s+([^/]*?)\s*\n\s*(%s%s\s*\(\s*(\w[^\s<]*))(?:<[^>]*>)?\s*,)' % (ignore_str, swap_str, fc_reflect_str, fc_reflect_possible_enum_ext), re.MULTILINE | re.DOTALL)
self.reflect_pattern = re.compile(r'(\b(%s%s\s*\(\s*(\w[^\s<]*)(?:<[^>]*>)?\s*,)\s*(\(.*?\))\s*\))[^\)]*%s%s\b' % (fc_reflect_str, fc_reflect_possible_enum_ext, fc_reflect_str, fc_reflect_possible_enum_ext), re.MULTILINE | re.DOTALL)
self.field_pattern = re.compile(r'\(([^\)]+)\)', re.MULTILINE | re.DOTALL)
self.ignore_swap_pattern = re.compile(r'\b([\w\d]+)\b', re.MULTILINE | re.DOTALL)
def read(self):
while self.current < self.end:
match_2_comments = self.with_2_comments.search(self.content[self.current:])
match_comment = self.with_comment.search(self.content[self.current:])
match_reflect = self.reflect_pattern.search(self.content[self.current:])
match_loc = None
if match_2_comments or match_comment:
loc1 = self.content.find(match_2_comments.group(1), self.current) if match_2_comments else self.end
loc2 = self.content.find(match_comment.group(1), self.current) if match_comment else self.end
debug("loc1=%s and loc2=%s" % (loc1, loc2))
group1 = match_2_comments.group(1) if match_2_comments else "<EMPTY>"
group2 = match_comment.group(1) if match_comment else "<EMPTY>"
debug("\n ***** group1={\n%s\n}\n\n\n ***** group2={\n%s\n}\n\n\n" % (group1, group2))
if loc2 < loc1:
debug("loc2 earlier")
match_2_comments = None
match_loc = loc2
else:
match_loc = loc1
if match_reflect and match_loc is not None:
debug("match_reflect and one of the other matches")
loc1 = self.content.find(match_reflect.group(1), self.current)
if loc1 < match_loc:
debug("choose the other matches")
match_comment = None
match_2_comments = None
else:
debug("choose comment")
pass
if match_2_comments:
debug("match_2_comments")
debug("Groups {")
for g in match_2_comments.groups():
debug(" %s" % g)
debug("}")
assert len(match_2_comments.groups()) == 7, "match_2_comments wrong size due to regex pattern change"
ignore_or_swap1 = match_2_comments[2]
next_reflect_ignore_swap1 = match_2_comments[3]
ignore_or_swap2 = match_2_comments[4]
next_reflect_ignore_swap2 = match_2_comments[5]
search_string_for_next_reflect_class = match_2_comments[6]
next_reflect_class = match_2_comments[7]
self.add_ignore_swaps(next_reflect_class, next_reflect_ignore_swap1, ignore_or_swap1)
self.add_ignore_swaps(next_reflect_class, next_reflect_ignore_swap2, ignore_or_swap2)
elif match_comment:
debug("match_comment")
debug("Groups {")
for g in match_comment.groups():
debug(" %s" % g)
debug("}")
assert len(match_comment.groups()) == 5, "match_comment too short due to regex pattern change"
# not using array indices here because for some reason the type of match_2_comments and match_comment are different
ignore_or_swap = match_comment.group(2)
next_reflect_ignore_swap = match_comment.group(3)
search_string_for_next_reflect_class = match_comment.group(4)
next_reflect_class = match_comment.group(5)
self.add_ignore_swaps(next_reflect_class, next_reflect_ignore_swap, ignore_or_swap)
if match_reflect:
debug("match_reflect")
debug("Groups {")
for g in match_reflect.groups():
debug(" %s" % g)
debug("}")
assert len(match_reflect.groups()) == 4, "match_reflect too short due to regex pattern change"
next_reflect = match_reflect.group(2)
next_reflect_class = match_reflect.group(3)
next_reflect_fields = match_reflect.group(4)
self.add_fields(next_reflect, next_reflect_class, next_reflect_fields)
else:
debug("search for next reflect done")
self.current = self.end
break
def find_or_add(self, reflect_class):
if reflect_class not in self.classes:
debug("find_or_add added \"%s\"" % (reflect_class))
self.classes[reflect_class] = Reflection(reflect_class)
return self.classes[reflect_class]
def add_fields(self, next_reflect, next_reflect_class, next_reflect_fields):
old = self.current
self.current = self.content.find(next_reflect, self.current) + len(next_reflect)
debug("all={\n\n%s\n\nclass=\n\n%s\n\nfields=\n\n%s\n\n" % (next_reflect, next_reflect_class, next_reflect_fields))
fields = re.findall(self.field_pattern, next_reflect_fields)
for field in fields:
self.add_field(next_reflect_class, field)
reflect_class = self.find_or_add(next_reflect_class)
debug("add_fields %s done, fields count=%s, ignored count=%s, swapped count=%s" % (next_reflect_class, len(reflect_class.fields), len(reflect_class.ignored), len(reflect_class.swapped)))
def add_ignore_swaps(self, next_reflect_class, next_reflect_ignores_swaps, ignore_or_swap):
debug("class=\n\n%s\n\n%s=\n\n%s\n\n" % (next_reflect_class, ignore_or_swap, next_reflect_ignores_swaps))
end = len(next_reflect_ignores_swaps)
current = 0
while current < end:
ignore_swap_match = self.ignore_swap_pattern.search(next_reflect_ignores_swaps[current:])
if ignore_swap_match:
ignore_swap = ignore_swap_match.group(1)
reflect_class = self.find_or_add(next_reflect_class)
if (ignore_or_swap == ignore_str):
assert ignore_swap not in reflect_class.ignored, "Reflection for %s repeats %s \"%s\"" % (next_reflect_class, ignore_or_swap)
assert ignore_swap not in reflect_class.swapped, "Reflection for %s references field \"%s\" in %s and %s " % (next_reflect_class, ignore_swap, ignore_str, swap_str)
reflect_class.ignored.append(ignore_swap)
else:
assert ignore_swap not in reflect_class.swapped, "Reflection for %s repeats %s \"%s\"" % (next_reflect_class, ignore_or_swap)
assert ignore_swap not in reflect_class.ignored, "Reflection for %s references field \"%s\" in %s and %s " % (next_reflect_class, ignore_swap, swap_str, ignore_str)
reflect_class.swapped.append(ignore_swap)
debug("ignore or swap %s --> %s, ignored count=%s, swapped count=%s" % (next_reflect_class, ignore_swap, len(reflect_class.ignored), len(reflect_class.swapped)))
current = next_reflect_ignores_swaps.find(ignore_swap_match.group(0), current) + len(ignore_swap_match.group(0))
else:
break
def add_field(self, reflect_class_name, field):
reflect_class = self.find_or_add(reflect_class_name)
assert field not in reflect_class.fields, "Reflection for %s repeats field \"%s\"" % (reflect_class_name, field)
reflect_class.fields.append(field)
debug("add_field %s --> %s" % (reflect_class_name, field))
def replace_multi_line_comment(match):
all=match.group(1)
all=EmptyScope.strip_extra_pattern.sub("", all)
debug("multiline found=%s" % (all))
match=EmptyScope.ignore_swap_pattern.search(all)
if match:
ignore_or_swap = match.group(1)
all = match.group(2)
debug("multiline %s now=%s" % (ignore_or_swap, all))
invalid_chars=EmptyScope.invalid_chars_pattern.search(all)
if invalid_chars:
for ic in invalid_chars.groups():
debug("invalid_char=%s" % (ic))
debug("WARNING: looks like \"%s\" is intending to %s, but there are invalid characters - \"%s\"" % (all, ignore_or_swap, ",".join(invalid_chars.groups())))
return ""
groups=re.findall(EmptyScope.multi_line_comment_ignore_swap_pattern, all)
if groups is None:
return ""
rtn_str="// %s " % (ignore_or_swap)
rtn_str+=', '.join([group for group in groups if group is not None])
debug("multiline rtn_str=%s" % (rtn_str))
return rtn_str
debug("multiline no match")
return ""
def replace_line_comment(match):
all=match.group(0)
debug("singleline found=%s" % (all))
if EmptyScope.single_comment_ignore_swap_pattern.match(all):
return all
else:
return "\n"
def validate_file(file):
f = open(file, "r")
contents = "\n" + f.read() # lazy fix for complex regex
f.close()
contents = EmptyScope.multi_line_comment_pattern.sub(replace_multi_line_comment, contents)
contents = EmptyScope.single_comment_pattern.sub(replace_line_comment, contents)
found = re.search(fc_reflect_str, contents)
if found is None:
return
print("validate %s" % (file))
debug("validate %s" % (file))
global_namespace=Namespace("", None, 0, contents, None)
global_namespace.read()
if args.debug:
_, filename = os.path.split(file)
with open(os.path.join(temp_dir, filename + ".struct"), "w") as f:
f.write("global_namespace=%s" % (global_namespace))
with open(os.path.join(temp_dir, filename + ".stripped"), "w") as f:
f.write(contents)
reflections=Reflections(contents)
reflections.read()
for reflection_name in reflections.classes:
reflection = reflections.classes[reflection_name]
class_struct = global_namespace.find_class(reflection_name)
if class_struct is None:
match=re.search(r'^(.+?)::id_type$', reflection_name)
if match:
parent_class_name = match.group(1)
parent_class = global_namespace.find_class(parent_class_name)
if parent_class.ignore_id:
# this is a chainbase::object, don't need to worry about id_type definition
continue
class_struct_num_fields = len(class_struct.fields) if class_struct is not None else None
debug("reflection_name=%s, class field count=%s, reflection field count=%s, ingore count=%s, swap count=%s" % (reflection_name, class_struct_num_fields, len(reflection.fields), len(reflection.ignored), len(reflection.swapped)))
assert isinstance(class_struct, ClassStruct), "could not find a %s/%s/%s for %s" % (EmptyScope.class_str, EmptyScope.struct_str, EmptyScope.enum_str, reflection_name)
if class_struct.ignore_id:
id_field = "id"
if id_field not in reflection.ignored and id_field not in reflection.fields:
debug("Object ignore_id Adding id to ignored for %s" % (reflection_name))
reflection.ignored.append(id_field)
else:
debug("Object ignore_id NOT adding id to ignored for %s" % (reflection_name))
rf_index = 0
rf_len = len(reflection.fields)
processed = []
back_swapped = []
fwd_swapped = []
ignored = []
f_index = 0
f_len = len(class_struct.fields)
while f_index < f_len:
field = class_struct.fields[f_index]
reflect_field = reflection.fields[rf_index] if rf_index < rf_len else None
processed.append(field)
debug("\nfield=%s reflect_field=%s" % (field, reflect_field))
if field in reflection.swapped:
debug("field \"%s\" swapped (back)" % (field))
reflection.swapped.remove(field)
back_swapped.append(field)
assert field in reflection.fields, "Reflection for %s indicates swapping %s but swapped position is not indicated in the reflection fields. Should it be ignored?" % (reflection_name, field)
assert reflect_field != field, "Reflection for %s should not indicate swapping %s since it is in the correct order" % (reflection_name, field)
f_index += 1
continue
if reflect_field in reflection.swapped:
debug("field \"%s\" swapped (fwd)" % (field))
reflection.swapped.remove(reflect_field)
fwd_swapped.append(reflect_field)
assert reflect_field in reflection.fields, "Reflection for %s indicates swapping field %s but it doesn't exist in that class/struct so it should be removed" % (reflection_name, reflect_field)
rf_index += 1
continue
assert reflect_field not in ignored, "Reflection for %s should not indicate %s for %s; it should indicate %s - %s" % (reflection_name, ignore_str, reflect_field, swap_str, ",".join(ignored))
if field in reflection.ignored:
debug("ignoring: %s" % (field))
reflection.ignored.remove(field)
ignored.append(field)
assert reflect_field != field, "Reflection for %s should not indicate ignoring %s since it is in the correct order" % (reflection_name, field)
f_index += 1
continue
debug("ignored=%s, swapped=%s" % (",".join(reflection.ignored),",".join(reflection.swapped)))
if reflect_field is not None and reflect_field in back_swapped:
back_swapped.remove(reflect_field)
rf_index += 1
elif field in fwd_swapped:
fwd_swapped.remove(field)
f_index += 1
else:
assert reflect_field == field, "Reflection for %s should have field %s instead of %s or else it should indicate if the field should be ignored (%s) or swapped (%s)" %(reflection_name, field, reflect_field, ignore_str, swap_str)
f_index += 1
rf_index += 1
debug("rf_index=%s, rf_len=%s, f_index=%s, f_len=%s" % (rf_index, rf_len, f_index, f_len))
assert len(reflection.ignored) == 0, "Reflection for %s has erroneous ignores - \"%s\"" % (reflection_name, ",".join(reflection.ignored))
unused_reflect_fields = []
while rf_index < rf_len:
debug("rf_index=%s, rf_len=%s fields=%s" % (rf_index, rf_len, ",".join(reflection.fields)))
reflect_field = reflection.fields[rf_index]
if reflect_field in back_swapped:
back_swapped.remove(reflect_field)
else:
unused_reflect_fields.append(reflect_field)
rf_index += 1
assert len(unused_reflect_fields) == 0, "Reflection for %s has fields not in definition for class/struct - \"%s\"" % (reflection_name, ",".join(unused_reflect_fields))
assert len(reflection.swapped) == 0, "Reflection for %s has erroneous swaps - \"%s\"" % (reflection_name, ",".join(reflection.swapped))
assert len(back_swapped) == 0, "Reflection for %s indicated swapped fields that were never provided - \"%s\"" % (reflection_name, ",".join(back_swapped))
assert len(fwd_swapped) == 0, "Reflection for %s indicated and provided swapped fields that are not in the class - \"%s\"" % (reflection_name, ",".join(fwd_swapped))
print("%s passed" % (file))
success = True
def walk(current_dir):
result = True
print("Searching for files: %s" % (current_dir))
for root, dirs, filenames in os.walk(current_dir):
for filename in filenames:
_, extension = os.path.splitext(filename)
if extension not in extensions:
continue
try:
validate_file(os.path.join(root, filename))
except AssertionError:
_, info, tb = sys.exc_info()
traceback.print_tb(tb) # Fixed format
tb_info = traceback.extract_tb(tb)
filename, line, func, text = tb_info[-1]
print("An error occurred in %s:%s: %s" % (filename, line, info), file=sys.stderr)
if args.exit_on_error:
exit(1)
result = False
if not recurse:
break
return result
for file in args.files:
if os.path.isdir(file):
success &= walk(file)
elif os.path.isfile(file):
try:
validate_file(file)
except AssertionError:
_, info, tb = sys.exc_info()
traceback.print_tb(tb) # Fixed format
tb_info = traceback.extract_tb(tb)
filename, line, func, text = tb_info[-1]
print("An error occurred in %s:%s: %s" % (filename, line, info), file=sys.stderr)
if args.exit_on_error:
exit(1)
success = False
else:
print("ERROR \"%s\" is neither a directory nor a file" % file)
success = False
if success:
exit(0)
else:
exit(1)
|
py | b40aa32d265ecfca16f20f6d220217d9ce1f1d00 | READY = 'READY'
SURVEILLANCE = 'SURVEILLANCE'
ALARM = 'ALARM'
states = {
READY: 1,
SURVEILLANCE: 2,
ALARM: 3
}
MOTION_DETECT_COUNT = 2
MOTION_DETECT_PERIOD = 15 #seconds
LED_PIN_OUTS = [21, 20, 16, 12]
MOTION_SENSOR_PIN = 19
LIGHT_SENSOR_PIN = 13
BUZZER_PIN = 26
DEFAULT_BTN_HOLD_TIME = 2 # Seconds
BUTTON_PIN = 2
|
py | b40aa347f71f23060925c23d22362eea350303d7 | from bs4 import BeautifulSoup
import requests
import csv
import sys
from urllib.error import HTTPError
sys.path.append("..")
import mytemp
import time
import json
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
# https://www.linkedin.com/uas/login-submit
data={
'session_key':'[email protected]',
'session_password':'774334640a',
# session_redirect:/voyager/loginRedirect.html
}
# loginUrl='https://www.linkedin.com/uas/login-submit'
# response=requests.post(loginUrl,data)
# print(response.text)
# token=json.loads(response.text)['data']['token']
cook='bcookie="v=2&e6b7a84a-9195-4bef-8875-c129bafe2148"; _ga=GA1.2.847762353.1504453393; bscookie="v=1&201807261153213add1e42-b78f-4d7f-8ac6-bbc3898e2884AQFOkaVBDYFijzw6mAKZ9KzGdtdlQ0Bu"; _lipt=CwEAAAFmetGb8e8zfPHZXUKk8T5chfEm42WW2VDJxQBtHvI63jLipwt6jZ00MY26ex1y6lEUQE7iiZ3n7fwBOkmtifNHHaVGPFWAoVQkGrBdQZEYy3Uxr1qKiYDsGHqFUCne_DIqODbMZmgUllcFfUjCltSqLcCUoGCcfZfKNzHCmGHrICGv-e5YT_AkKnHxpQWpIMbqLBun7MkOrhCege-6UkLg5Ua1FgMNGJvarsk; _guid=9af84474-c269-4173-ad7c-8c3766235272; li_oatml=AQEnFngsmPWikAAAAWZ60ba6koy90ih2OSpHlxuAlurNTdu_h9Dy2pudGt5qd7NRXMSL0gZ-e3Wx_p5AHLwXez1Wp_-hekEw; visit="v=1&M"; lang="v=2&lang=zh-cn"; JSESSIONID="ajax:0439271021474059911"; leo_auth_token="GST:8iKDsYRKFGGkEzphVZ0G7YA0uYP_cfypNrLDhdTKUDDgEAhFonbQdt:1539678314:e225e4638fff38caac331ab2321e9efea0dc1281"; li_at=AQEDASPXDIUFK84pAAABZnv6oRoAAAFmoAclGlEANJSsupIwhpchZ5lUeBXhif0lSUMVJRV_XYcw2ZU9kxEmxS6cSTjZiRtnVs8Uby191URFPfcM2u4Cwq3qKV9aXo6axaSihiqpUCHUv05rOXPHAnab; liap=true; li_cc=AQHh_CVcMTIb4wAAAWZ7-8F7TmJ9H4KXdrFRYUQZsv2D1aoYyC3QxoIovTvSdphJ3YZ_dXwd1fLF; lidc="b=SGST00:g=6:u=1:i=1539678549:t=1539764949:s=AQF6BkkW6ia13YwX_TKYH9JId2uSMJ09"'
header={
'Cookie':cook,
# 'referer':'https://www.linkedin.com/mynetwork/invite-connect/connections/',
'csrf-token':'ajax:0439271021474059911',
}
# f1=open('linkedin.csv','a+',encoding='gb18030',newline='')
# csv_write=csv.writer(f1)
# for i in range(103,106):
# url='https://www.linkedin.com/voyager/api/relationships/connections?start='+str(i*40)+'&count=40&sortType=RECENTLY_ADDED'
# bsObj=requests.get(url,headers=header)
# ele=json.loads(bsObj.text)['elements']
# for e in ele:
# # print(e['miniProfile'])
# newUrl='https://www.linkedin.com/in/'+e['miniProfile']['publicIdentifier']+'/'
# print(newUrl)
# csv_write.writerow([newUrl])
# # print(bsObj)
# # # profileView='https://www.linkedin.com/voyager/api/identity/profiles/%E9%BB%98-%E5%BC%A0-778261149/profileView/'
# # break
# # break
f2=open('linkedin_final.csv','a+',encoding='gb18030',newline='')
csv_write=csv.writer(f2)
for line in csv.reader(open('linkedin.csv','r',encoding='gb18030')):
url=line[0]
print(url)
# url='https://www.linkedin.com/in/贵勇-游-67051615b/'
mainText=url.replace('https://www.linkedin.com/in/','')
newUrl='https://www.linkedin.com/voyager/api/identity/profiles/'+mainText+'profileView/'
req=requests.get(newUrl,headers=header)
js=json.loads(req.text)
# print(js)
#教育背景
if 'educationView' not in js:
continue
eduText=""
for edu in js['educationView']['elements']:
if 'schoolName' not in edu:
eduText=eduText+edu['school']['schoolName']+'_'
else:
eduText=eduText+edu['schoolName']+'_'
if 'degreeName' in edu:
eduText=eduText+edu['degreeName']+'_'
if 'fieldOfStudy' in edu:
eduText=eduText+edu['fieldOfStudy']+" "
if 'timePeriod' not in edu:
continue
if 'startDate' not in edu['timePeriod']:
continue
eduText=eduText+str(edu['timePeriod']['startDate']['year'])+'年'
if 'endDate' in edu['timePeriod']:
eduText=eduText+"——"+str(edu['timePeriod']['endDate']['year'])+'年'
eduText=eduText+'\n'
higherDegree=eduText.split('_')[0]
# print(eduText)
#工作经历
workText=""
nowWork=''
for work in js['positionGroupView']['elements']:
# print(work)
# break
if 'title' in work['positions'][0]:
workText=workText+work['positions'][0]['title']+"_"
if 'name' in work:
workText=workText+work['name']+' '
if 'startDate' not in work['timePeriod']:
continue
workText=workText+str(work['timePeriod']['startDate']['year'])+'年'
if 'month' in work['timePeriod']['startDate']:
workText=workText+str(work['timePeriod']['startDate']['month'])+'月'
workText=workText+"——"
if 'endDate' in work['timePeriod']:
workText=workText+str(work['timePeriod']['endDate']['year'])+'年'
if 'month' in work['timePeriod']['endDate']:
workText=workText+"-"+str(work['timePeriod']['endDate']['month'])+'月'+' '
else:
workText=workText+' '
else:
if 'name' in work:
nowWork=nowWork+'_'+work['name']
workText=workText+"至今"+' '
if 'locationName' in work['positions'][0]:
workText=workText+work['positions'][0]['locationName']
workText=workText+"\n"
# print(workText)
#技能认可
skillText=''
# print(js['skillView']['elements'])
for skill in js['skillView']['elements']:
skillText=skillText+skill['name']+'_'
# print(skillText)
try:
summary=js['profile']['summary']
except:
summary=''
mini=js['profile']['miniProfile']
pname=''
if 'lastName' in mini:
pname=pname+mini['lastName']
pname=pname+mini['firstName']
if 'occupation' in mini:
occupation=mini['occupation']
else:
occupation=''
if 'locationName' in js['profile']:
city=js['profile']['locationName']
else:
city=''
row=[url,pname,occupation,city,nowWork,higherDegree,summary,workText,eduText,skillText]
csv_write.writerow(row)
# break
#可用来解决js渲染的重定向数据抓取不到问题
def phangetObj(url,data=None):
desire = DesiredCapabilities.PHANTOMJS.copy()
for key, value in header.items():
desire['phantomjs.page.customHeaders.{}'.format(key)] = value
driver = webdriver.PhantomJS(desired_capabilities=desire, executable_path="phantomjs.exe",service_args=['--load-images=no'])#将yes改成no可以让浏览器不加载图片
# driver =webdriver.PhantomJS()
#使用浏览器请求页面
driver.get(url)
# driver.add_cookie(header)
#加载3秒,等待所有数据加载完毕
time.sleep(3)
#通过id来定位元素,
#.text获取元素的文本数据
# dl=driver.find_elements_by_css_selector("#searchLeftOptions")
# dl=driver.find_element_by_id("bpr-guid-1537650").get_text()
# print(dl)
pageSource=driver.page_source
print(pageSource)
# bsObj=BeautifulSoup(pageSource,"html.parser")
# education=bsObj.find('code',{'id':'bpr-guid-1537650'}).get_text()
# data=json.loads(education)['data']
# print(data)
driver.close()
return 0
# phangetObj(newUrl)
|
py | b40aa3c511222602b87f82447cd259f27cdd1439 | from constants import \
SUIT_TO_STRING, \
VAL_TO_STRING, \
LETTER_TO_POSITION_KEY, \
PLAYER_TO_TEAM
def _in_cards(find, hand):
for idx, card in enumerate(hand):
if find['suit'] == card['suit'] and find['card']['val'] == card['card']['val']:
return idx
return -1
def _round_over(gamestate):
for player in gamestate['players'].keys():
if len(gamestate['players'][player]['cards']) != 0:
return False
else:
return True
def _trick_over(gamestate):
if len(gamestate['trick']['current_play']) == 4:
return True
else:
return False
def _rank_hand(gamestate, position):
player = gamestate['trick']['current_play'][LETTER_TO_POSITION_KEY[position.upper()]]
if len(player['cards']) == 2:
if player['cards'][0]['value']['rank'] == player['cards'][1]['value']['rank'] \
and player['cards'][0]['suit'] == player['cards'][1]['suit']:
player['rank'] = 2
else:
player['rank'] = 1
else:
player['rank'] = 1
def _trick_winner(gamestate):
pass
return "N"
def _points_in_current_play(gamestate):
total = 0
for player in gamestate['players'].keys():
for card in gamestate['trick']['current_play'][player]['cards']:
total += card['value']['points']
return total
def _assign_points(gamestate):
[_rank_hand(gamestate, position) for position in gamestate['players'].keys()]
winner = _trick_winner(gamestate)
gamestate['trick']['starter'] = winner
if gamestate['defender'] == PLAYER_TO_TEAM[winner]:
gamestate['points'] += _points_in_current_play(gamestate)
gamestate['trick']['current_play'] = []
def _card_to_string(card):
return SUIT_TO_STRING[card['suit']] + VAL_TO_STRING[card['value']['rank']]
def _follow_suit(gamestate, position):
num_suit = 0
for card in gamestate['players'][LETTER_TO_POSITION_KEY[position.upper()]]['cards']:
if card['suit'] == gamestate['trick']['suit']:
num_suit += 1
return num_suit
def _check_suits(gamestate, position, cards):
suit = cards[0]['suit']
if gamestate['players'][position]['leading']:
gamestate['trick']['suit'] = suit
num_follow = _follow_suit(gamestate, position)
num_dont_follow = len(cards) - num_follow
for card in cards:
if card['suit'] != suit:
if num_dont_follow == 0:
return {'error': True,
'message': "Different suits within play/Too many different suited cards"}
else:
num_dont_follow -= 1
if card['suit'] != gamestate['trick']['suit']:
if num_follow > 0:
return {'error': True,
'message': "Different suit from leading suit/Too many different non-leading suited cards"}
else:
num_follow -= 1
return {'error': False,
'message': "All suits match"}
def _check_style(gamestate, cards):
# if len(cards) == 6:
# if cards[0]['val'] == cards[1]['val'] and cards[2]['val'] == cards[3]['val'] \
# and cards[4]['val'] == cards[5]['val'] and cards[1]['val'] + 1 == cards[2]['val'] \
# and cards[3]['val'] + 1 == cards[4]['val']:
# return {'error': False,
# 'message': "Three consecutive pairs played"}
# else:
# return {'error': True,
# 'message': "Invalid play with six cards"}
# elif len(cards) == 4:
# if cards[0]['val'] == cards[1]['val'] and cards[2]['val'] == cards[3]['val'] \
# and cards[1]['val'] + 1 == cards[2]['val']:
# return {'error': False,
# 'message': "Two consecutive pairs played"}
# else:
# return {'error': True,
# 'message': "Invalid play with four cards"}
if len(cards) == 2:
if cards[0]['value']['rank'] == cards[1]['value']['rank'] and cards[0]['suit'] == cards[1]['suit']:
return {'error': False,
'message': "Pair played"}
else:
return {'error': True,
'message': "Invalid play with two cards"}
else:
return {'error': False,
'message': "Single played"}
def _check_valid(gamestate, position, cards):
sorted_cards = sorted(cards, key=lambda k: k['value']['rank'])
valid_suits = _check_suits(gamestate, position, sorted_cards)
if valid_suits['error']:
return valid_suits
valid_style = _check_style(gamestate, sorted_cards)
return valid_style
|
py | b40aa5469c047dbc6e1c458abef8bf3625f2562c | import os
import requests
import requests.auth
import urllib
def make_authorization_url(
REDIRECT_URI=os.environ['REDIRECT_URI'],
CLIENT_ID=os.environ['PANOPTES_CLIENT_ID'],
BASE_URL='https://panoptes.zooniverse.org/oauth/authorize'):
params = {"client_id": CLIENT_ID,
"response_type" : "code",
"redirect_uri" : REDIRECT_URI,
"scope" : 'collection+public',
}
return BASE_URL + '?' + urllib.unquote_plus(urllib.urlencode(params))
def get_token(code,
REDIRECT_URI=os.environ['REDIRECT_URI'],
CLIENT_ID=os.environ['PANOPTES_CLIENT_ID'],
CLIENT_SECRET=os.environ['PANOPTES_CLIENT_SECRET']):
#client_auth = requests.auth.HTTPBasicAuth(CLIENT_ID, CLIENT_SECRET)
post_data = {"grant_type": "authorization_code",
"code": code,
"redirect_uri": REDIRECT_URI,
"client_id" : CLIENT_ID,
"client_secret" : CLIENT_SECRET,
}
response = requests.post("https://panoptes.zooniverse.org/oauth/token",
data=post_data)
token_json = response.json()
return token_json["access_token"], token_json["expires_in"]
def get_username(access_token):
headers = {'Accept': 'application/vnd.api+json; version=1',
'Content-Type': 'application/json',
"Authorization": "Bearer " + str(access_token)}
response = requests.get("https://panoptes.zooniverse.org/api/me", headers=headers)
if response.ok:
me_json = response.json()
return me_json['users'][0]['login']
|
py | b40aa563ee7a201dc7b81d2741d0f1549c957cfc | import pickle
from dbt.contracts.graph.compiled import (
CompiledModelNode, InjectedCTE, CompiledSchemaTestNode
)
from dbt.contracts.graph.parsed import (
DependsOn, NodeConfig, TestConfig, TestMetadata
)
from dbt.node_types import NodeType
from .utils import ContractTestCase
class TestCompiledModelNode(ContractTestCase):
ContractType = CompiledModelNode
def _minimum(self):
return {
'name': 'foo',
'root_path': '/root/',
'resource_type': str(NodeType.Model),
'path': '/root/x/path.sql',
'original_file_path': '/root/path.sql',
'package_name': 'test',
'raw_sql': 'select * from wherever',
'unique_id': 'model.test.foo',
'fqn': ['test', 'models', 'foo'],
'database': 'test_db',
'schema': 'test_schema',
'alias': 'bar',
'compiled': False,
}
def test_basic_uncompiled(self):
node_dict = {
'name': 'foo',
'root_path': '/root/',
'resource_type': str(NodeType.Model),
'path': '/root/x/path.sql',
'original_file_path': '/root/path.sql',
'package_name': 'test',
'raw_sql': 'select * from wherever',
'unique_id': 'model.test.foo',
'fqn': ['test', 'models', 'foo'],
'refs': [],
'sources': [],
'depends_on': {'macros': [], 'nodes': []},
'database': 'test_db',
'deferred': False,
'description': '',
'schema': 'test_schema',
'alias': 'bar',
'tags': [],
'config': {
'column_types': {},
'enabled': True,
'materialized': 'view',
'persist_docs': {},
'post-hook': [],
'pre-hook': [],
'quoting': {},
'tags': [],
'vars': {},
},
'docs': {'show': True},
'columns': {},
'meta': {},
'compiled': False,
'extra_ctes': [],
'extra_ctes_injected': False,
}
node = self.ContractType(
package_name='test',
root_path='/root/',
path='/root/x/path.sql',
original_file_path='/root/path.sql',
raw_sql='select * from wherever',
name='foo',
resource_type=NodeType.Model,
unique_id='model.test.foo',
fqn=['test', 'models', 'foo'],
refs=[],
sources=[],
depends_on=DependsOn(),
deferred=False,
description='',
database='test_db',
schema='test_schema',
alias='bar',
tags=[],
config=NodeConfig(),
meta={},
compiled=False,
extra_ctes=[],
extra_ctes_injected=False,
)
self.assert_symmetric(node, node_dict)
self.assertFalse(node.empty)
self.assertTrue(node.is_refable)
self.assertFalse(node.is_ephemeral)
self.assertEqual(node.local_vars(), {})
minimum = self._minimum()
self.assert_from_dict(node, minimum)
pickle.loads(pickle.dumps(node))
def test_basic_compiled(self):
node_dict = {
'name': 'foo',
'root_path': '/root/',
'resource_type': str(NodeType.Model),
'path': '/root/x/path.sql',
'original_file_path': '/root/path.sql',
'package_name': 'test',
'raw_sql': 'select * from {{ ref("other") }}',
'unique_id': 'model.test.foo',
'fqn': ['test', 'models', 'foo'],
'refs': [],
'sources': [],
'depends_on': {'macros': [], 'nodes': []},
'database': 'test_db',
'deferred': True,
'description': '',
'schema': 'test_schema',
'alias': 'bar',
'tags': [],
'config': {
'column_types': {},
'enabled': True,
'materialized': 'view',
'persist_docs': {},
'post-hook': [],
'pre-hook': [],
'quoting': {},
'tags': [],
'vars': {},
},
'docs': {'show': True},
'columns': {},
'meta': {},
'compiled': True,
'compiled_sql': 'select * from whatever',
'extra_ctes': [{'id': 'whatever', 'sql': 'select * from other'}],
'extra_ctes_injected': True,
'injected_sql': 'with whatever as (select * from other) select * from whatever',
}
node = self.ContractType(
package_name='test',
root_path='/root/',
path='/root/x/path.sql',
original_file_path='/root/path.sql',
raw_sql='select * from {{ ref("other") }}',
name='foo',
resource_type=NodeType.Model,
unique_id='model.test.foo',
fqn=['test', 'models', 'foo'],
refs=[],
sources=[],
depends_on=DependsOn(),
deferred=True,
description='',
database='test_db',
schema='test_schema',
alias='bar',
tags=[],
config=NodeConfig(),
meta={},
compiled=True,
compiled_sql='select * from whatever',
extra_ctes=[InjectedCTE('whatever', 'select * from other')],
extra_ctes_injected=True,
injected_sql='with whatever as (select * from other) select * from whatever',
)
self.assert_symmetric(node, node_dict)
self.assertFalse(node.empty)
self.assertTrue(node.is_refable)
self.assertFalse(node.is_ephemeral)
self.assertEqual(node.local_vars(), {})
def test_invalid_extra_fields(self):
bad_extra = self._minimum()
bad_extra['notvalid'] = 'nope'
self.assert_fails_validation(bad_extra)
def test_invalid_bad_type(self):
bad_type = self._minimum()
bad_type['resource_type'] = str(NodeType.Macro)
self.assert_fails_validation(bad_type)
class TestCompiledSchemaTestNode(ContractTestCase):
ContractType = CompiledSchemaTestNode
def _minimum(self):
return {
'name': 'foo',
'root_path': '/root/',
'resource_type': str(NodeType.Test),
'path': '/root/x/path.sql',
'original_file_path': '/root/path.sql',
'package_name': 'test',
'raw_sql': 'select * from wherever',
'unique_id': 'model.test.foo',
'fqn': ['test', 'models', 'foo'],
'database': 'test_db',
'schema': 'test_schema',
'alias': 'bar',
'test_metadata': {
'name': 'foo',
'kwargs': {},
},
'compiled': False,
}
def test_basic_uncompiled(self):
node_dict = {
'name': 'foo',
'root_path': '/root/',
'resource_type': str(NodeType.Test),
'path': '/root/x/path.sql',
'original_file_path': '/root/path.sql',
'package_name': 'test',
'raw_sql': 'select * from wherever',
'unique_id': 'model.test.foo',
'fqn': ['test', 'models', 'foo'],
'refs': [],
'sources': [],
'depends_on': {'macros': [], 'nodes': []},
'database': 'test_db',
'description': '',
'schema': 'test_schema',
'alias': 'bar',
'tags': [],
'config': {
'column_types': {},
'enabled': True,
'materialized': 'view',
'persist_docs': {},
'post-hook': [],
'pre-hook': [],
'quoting': {},
'tags': [],
'vars': {},
'severity': 'ERROR',
},
'deferred': False,
'docs': {'show': True},
'columns': {},
'meta': {},
'compiled': False,
'extra_ctes': [],
'extra_ctes_injected': False,
'test_metadata': {
'name': 'foo',
'kwargs': {},
},
}
node = self.ContractType(
package_name='test',
root_path='/root/',
path='/root/x/path.sql',
original_file_path='/root/path.sql',
raw_sql='select * from wherever',
name='foo',
resource_type=NodeType.Test,
unique_id='model.test.foo',
fqn=['test', 'models', 'foo'],
refs=[],
sources=[],
deferred=False,
depends_on=DependsOn(),
description='',
database='test_db',
schema='test_schema',
alias='bar',
tags=[],
config=TestConfig(),
meta={},
compiled=False,
extra_ctes=[],
extra_ctes_injected=False,
test_metadata=TestMetadata(namespace=None, name='foo', kwargs={}),
)
self.assert_symmetric(node, node_dict)
self.assertFalse(node.empty)
self.assertFalse(node.is_refable)
self.assertFalse(node.is_ephemeral)
self.assertEqual(node.local_vars(), {})
minimum = self._minimum()
self.assert_from_dict(node, minimum)
pickle.loads(pickle.dumps(node))
def test_basic_compiled(self):
node_dict = {
'name': 'foo',
'root_path': '/root/',
'resource_type': str(NodeType.Test),
'path': '/root/x/path.sql',
'original_file_path': '/root/path.sql',
'package_name': 'test',
'raw_sql': 'select * from {{ ref("other") }}',
'unique_id': 'model.test.foo',
'fqn': ['test', 'models', 'foo'],
'refs': [],
'sources': [],
'depends_on': {'macros': [], 'nodes': []},
'deferred': False,
'database': 'test_db',
'description': '',
'schema': 'test_schema',
'alias': 'bar',
'tags': [],
'config': {
'column_types': {},
'enabled': True,
'materialized': 'view',
'persist_docs': {},
'post-hook': [],
'pre-hook': [],
'quoting': {},
'tags': [],
'vars': {},
'severity': 'warn',
},
'docs': {'show': True},
'columns': {},
'meta': {},
'compiled': True,
'compiled_sql': 'select * from whatever',
'extra_ctes': [{'id': 'whatever', 'sql': 'select * from other'}],
'extra_ctes_injected': True,
'injected_sql': 'with whatever as (select * from other) select * from whatever',
'column_name': 'id',
'test_metadata': {
'name': 'foo',
'kwargs': {},
},
}
node = self.ContractType(
package_name='test',
root_path='/root/',
path='/root/x/path.sql',
original_file_path='/root/path.sql',
raw_sql='select * from {{ ref("other") }}',
name='foo',
resource_type=NodeType.Test,
unique_id='model.test.foo',
fqn=['test', 'models', 'foo'],
refs=[],
sources=[],
depends_on=DependsOn(),
deferred=False,
description='',
database='test_db',
schema='test_schema',
alias='bar',
tags=[],
config=TestConfig(severity='warn'),
meta={},
compiled=True,
compiled_sql='select * from whatever',
extra_ctes=[InjectedCTE('whatever', 'select * from other')],
extra_ctes_injected=True,
injected_sql='with whatever as (select * from other) select * from whatever',
column_name='id',
test_metadata=TestMetadata(namespace=None, name='foo', kwargs={}),
)
self.assert_symmetric(node, node_dict)
self.assertFalse(node.empty)
self.assertFalse(node.is_refable)
self.assertFalse(node.is_ephemeral)
self.assertEqual(node.local_vars(), {})
def test_invalid_extra_fields(self):
bad_extra = self._minimum()
bad_extra['extra'] = 'extra value'
self.assert_fails_validation(bad_extra)
def test_invalid_resource_type(self):
bad_type = self._minimum()
bad_type['resource_type'] = str(NodeType.Model)
self.assert_fails_validation(bad_type)
|
py | b40aa5c7a0dcb4d298c85207640e7b380255473a | # Copyright 2011 Isaku Yamahata <yamahata@valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from oslo_log import log as logging
from oslo_utils import strutils
import six
import nova.conf
from nova import exception
from nova.i18n import _
from nova import utils
from nova.virt import driver
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
DEFAULT_ROOT_DEV_NAME = '/dev/sda1'
_DEFAULT_MAPPINGS = {'ami': 'sda1',
'ephemeral0': 'sda2',
'root': DEFAULT_ROOT_DEV_NAME,
'swap': 'sda3'}
bdm_legacy_fields = set(['device_name', 'delete_on_termination',
'virtual_name', 'snapshot_id',
'volume_id', 'volume_size', 'no_device',
'connection_info'])
bdm_new_fields = set(['source_type', 'destination_type',
'guest_format', 'device_type', 'disk_bus', 'boot_index',
'device_name', 'delete_on_termination', 'snapshot_id',
'volume_id', 'volume_size', 'image_id', 'no_device',
'connection_info'])
bdm_db_only_fields = set(['id', 'instance_uuid'])
bdm_db_inherited_fields = set(['created_at', 'updated_at',
'deleted_at', 'deleted'])
bdm_new_non_api_fields = set(['volume_id', 'snapshot_id',
'image_id', 'connection_info'])
bdm_new_api_only_fields = set(['uuid'])
bdm_new_api_fields = ((bdm_new_fields - bdm_new_non_api_fields) |
bdm_new_api_only_fields)
class BlockDeviceDict(dict):
"""Represents a Block Device Mapping in Nova."""
_fields = bdm_new_fields
_db_only_fields = (bdm_db_only_fields |
bdm_db_inherited_fields)
_required_fields = set(['source_type'])
def __init__(self, bdm_dict=None, do_not_default=None, **kwargs):
super(BlockDeviceDict, self).__init__()
bdm_dict = bdm_dict or {}
bdm_dict.update(kwargs)
do_not_default = do_not_default or set()
self._validate(bdm_dict)
if bdm_dict.get('device_name'):
bdm_dict['device_name'] = prepend_dev(bdm_dict['device_name'])
bdm_dict['delete_on_termination'] = bool(
bdm_dict.get('delete_on_termination'))
# NOTE (ndipanov): Never default db fields
self.update({field: None for field in self._fields - do_not_default})
self.update(list(six.iteritems(bdm_dict)))
def _validate(self, bdm_dict):
"""Basic data format validations."""
dict_fields = set(key for key, _ in six.iteritems(bdm_dict))
# Check that there are no bogus fields
if not (dict_fields <=
(self._fields | self._db_only_fields)):
raise exception.InvalidBDMFormat(
details=_("Some fields are invalid."))
if bdm_dict.get('no_device'):
return
# Check that all required fields are there
if (self._required_fields and
not ((dict_fields & self._required_fields) ==
self._required_fields)):
raise exception.InvalidBDMFormat(
details=_("Some required fields are missing"))
if 'delete_on_termination' in bdm_dict:
bdm_dict['delete_on_termination'] = strutils.bool_from_string(
bdm_dict['delete_on_termination'])
if bdm_dict.get('device_name') is not None:
validate_device_name(bdm_dict['device_name'])
validate_and_default_volume_size(bdm_dict)
if bdm_dict.get('boot_index'):
try:
bdm_dict['boot_index'] = int(bdm_dict['boot_index'])
except ValueError:
raise exception.InvalidBDMFormat(
details=_("Boot index is invalid."))
@classmethod
def from_legacy(cls, legacy_bdm):
copy_over_fields = bdm_legacy_fields & bdm_new_fields
copy_over_fields |= (bdm_db_only_fields |
bdm_db_inherited_fields)
# NOTE (ndipanov): These fields cannot be computed
# from legacy bdm, so do not default them
# to avoid overwriting meaningful values in the db
non_computable_fields = set(['boot_index', 'disk_bus',
'guest_format', 'device_type'])
new_bdm = {fld: val for fld, val in six.iteritems(legacy_bdm)
if fld in copy_over_fields}
virt_name = legacy_bdm.get('virtual_name')
if is_swap_or_ephemeral(virt_name):
new_bdm['source_type'] = 'blank'
new_bdm['delete_on_termination'] = True
new_bdm['destination_type'] = 'local'
if virt_name == 'swap':
new_bdm['guest_format'] = 'swap'
else:
new_bdm['guest_format'] = CONF.default_ephemeral_format
elif legacy_bdm.get('snapshot_id'):
new_bdm['source_type'] = 'snapshot'
new_bdm['destination_type'] = 'volume'
elif legacy_bdm.get('volume_id'):
new_bdm['source_type'] = 'volume'
new_bdm['destination_type'] = 'volume'
elif legacy_bdm.get('no_device'):
# NOTE (ndipanov): Just keep the BDM for now,
pass
else:
raise exception.InvalidBDMFormat(
details=_("Unrecognized legacy format."))
return cls(new_bdm, non_computable_fields)
@classmethod
def from_api(cls, api_dict, image_uuid_specified):
"""Transform the API format of data to the internally used one.
Only validate if the source_type field makes sense.
"""
if not api_dict.get('no_device'):
source_type = api_dict.get('source_type')
device_uuid = api_dict.get('uuid')
destination_type = api_dict.get('destination_type')
if source_type not in ('volume', 'image', 'snapshot', 'blank'):
raise exception.InvalidBDMFormat(
details=_("Invalid source_type field."))
elif source_type == 'blank' and device_uuid:
raise exception.InvalidBDMFormat(
details=_("Invalid device UUID."))
elif source_type != 'blank':
if not device_uuid:
raise exception.InvalidBDMFormat(
details=_("Missing device UUID."))
api_dict[source_type + '_id'] = device_uuid
if source_type == 'image' and destination_type == 'local':
try:
boot_index = int(api_dict.get('boot_index', -1))
except ValueError:
raise exception.InvalidBDMFormat(
details=_("Boot index is invalid."))
# if this bdm is generated from --image ,then
# source_type = image and destination_type = local is allowed
if not (image_uuid_specified and boot_index == 0):
raise exception.InvalidBDMFormat(
details=_("Mapping image to local is not supported."))
api_dict.pop('uuid', None)
return cls(api_dict)
def legacy(self):
copy_over_fields = bdm_legacy_fields - set(['virtual_name'])
copy_over_fields |= (bdm_db_only_fields |
bdm_db_inherited_fields)
legacy_block_device = {field: self.get(field)
for field in copy_over_fields if field in self}
source_type = self.get('source_type')
destination_type = self.get('destination_type')
no_device = self.get('no_device')
if source_type == 'blank':
if self['guest_format'] == 'swap':
legacy_block_device['virtual_name'] = 'swap'
else:
# NOTE (ndipanov): Always label as 0, it is up to
# the calling routine to re-enumerate them
legacy_block_device['virtual_name'] = 'ephemeral0'
elif source_type in ('volume', 'snapshot') or no_device:
legacy_block_device['virtual_name'] = None
elif source_type == 'image':
if destination_type != 'volume':
# NOTE(ndipanov): Image bdms with local destination
# have no meaning in the legacy format - raise
raise exception.InvalidBDMForLegacy()
legacy_block_device['virtual_name'] = None
return legacy_block_device
def get_image_mapping(self):
drop_fields = (set(['connection_info']) |
self._db_only_fields)
mapping_dict = dict(self)
for fld in drop_fields:
mapping_dict.pop(fld, None)
return mapping_dict
def is_safe_for_update(block_device_dict):
"""Determine if passed dict is a safe subset for update.
Safe subset in this case means a safe subset of both legacy
and new versions of data, that can be passed to an UPDATE query
without any transformation.
"""
fields = set(block_device_dict.keys())
return fields <= (bdm_new_fields |
bdm_db_inherited_fields |
bdm_db_only_fields)
def create_image_bdm(image_ref, boot_index=0):
"""Create a block device dict based on the image_ref.
This is useful in the API layer to keep the compatibility
with having an image_ref as a field in the instance requests
"""
return BlockDeviceDict(
{'source_type': 'image',
'image_id': image_ref,
'delete_on_termination': True,
'boot_index': boot_index,
'device_type': 'disk',
'destination_type': 'local'})
def create_blank_bdm(size, guest_format=None):
return BlockDeviceDict(
{'source_type': 'blank',
'delete_on_termination': True,
'device_type': 'disk',
'boot_index': -1,
'destination_type': 'local',
'guest_format': guest_format,
'volume_size': size})
def snapshot_from_bdm(snapshot_id, template):
"""Create a basic volume snapshot BDM from a given template bdm."""
copy_from_template = ('disk_bus', 'device_type', 'boot_index',
'delete_on_termination', 'volume_size',
'device_name')
snapshot_dict = {'source_type': 'snapshot',
'destination_type': 'volume',
'snapshot_id': snapshot_id}
for key in copy_from_template:
snapshot_dict[key] = template.get(key)
return BlockDeviceDict(snapshot_dict)
def legacy_mapping(block_device_mapping):
"""Transform a list of block devices of an instance back to the
legacy data format.
"""
legacy_block_device_mapping = []
for bdm in block_device_mapping:
try:
legacy_block_device = BlockDeviceDict(bdm).legacy()
except exception.InvalidBDMForLegacy:
continue
legacy_block_device_mapping.append(legacy_block_device)
# Re-enumerate the ephemeral devices
for i, dev in enumerate(dev for dev in legacy_block_device_mapping
if dev['virtual_name'] and
is_ephemeral(dev['virtual_name'])):
dev['virtual_name'] = dev['virtual_name'][:-1] + str(i)
return legacy_block_device_mapping
def from_legacy_mapping(legacy_block_device_mapping, image_uuid='',
root_device_name=None, no_root=False):
"""Transform a legacy list of block devices to the new data format."""
new_bdms = [BlockDeviceDict.from_legacy(legacy_bdm)
for legacy_bdm in legacy_block_device_mapping]
# NOTE (ndipanov): We will not decide which device is root here - we assume
# that it will be supplied later. This is useful for having the root device
# as part of the image defined mappings that are already in the v2 format.
if no_root:
for bdm in new_bdms:
bdm['boot_index'] = -1
return new_bdms
image_bdm = None
volume_backed = False
# Try to assign boot_device
if not root_device_name and not image_uuid:
# NOTE (ndipanov): If there is no root_device, pick the first non
# blank one.
non_blank = [bdm for bdm in new_bdms if bdm['source_type'] != 'blank']
if non_blank:
non_blank[0]['boot_index'] = 0
else:
for bdm in new_bdms:
if (bdm['source_type'] in ('volume', 'snapshot', 'image') and
root_device_name is not None and
(strip_dev(bdm.get('device_name')) ==
strip_dev(root_device_name))):
bdm['boot_index'] = 0
volume_backed = True
elif not bdm['no_device']:
bdm['boot_index'] = -1
else:
bdm['boot_index'] = None
if not volume_backed and image_uuid:
image_bdm = create_image_bdm(image_uuid, boot_index=0)
return ([image_bdm] if image_bdm else []) + new_bdms
def properties_root_device_name(properties):
"""get root device name from image meta data.
If it isn't specified, return None.
"""
root_device_name = None
# NOTE(yamahata): see image_service.s3.s3create()
for bdm in properties.get('mappings', []):
if bdm['virtual'] == 'root':
root_device_name = bdm['device']
# NOTE(yamahata): register_image's command line can override
# <machine>.manifest.xml
if 'root_device_name' in properties:
root_device_name = properties['root_device_name']
return root_device_name
def validate_device_name(value):
try:
# NOTE (ndipanov): Do not allow empty device names
# until assigning default values
# is supported by nova.compute
utils.check_string_length(value, 'Device name',
min_length=1, max_length=255)
except exception.InvalidInput:
raise exception.InvalidBDMFormat(
details=_("Device name empty or too long."))
if ' ' in value:
raise exception.InvalidBDMFormat(
details=_("Device name contains spaces."))
def validate_and_default_volume_size(bdm):
if bdm.get('volume_size'):
try:
bdm['volume_size'] = utils.validate_integer(
bdm['volume_size'], 'volume_size', min_value=0)
except exception.InvalidInput:
# NOTE: We can remove this validation code after removing
# Nova v2.0 API code because v2.1 API validates this case
# already at its REST API layer.
raise exception.InvalidBDMFormat(
details=_("Invalid volume_size."))
_ephemeral = re.compile('^ephemeral(\d|[1-9]\d+)$')
def is_ephemeral(device_name):
return _ephemeral.match(device_name) is not None
def ephemeral_num(ephemeral_name):
assert is_ephemeral(ephemeral_name)
return int(_ephemeral.sub('\\1', ephemeral_name))
def is_swap_or_ephemeral(device_name):
return (device_name and
(device_name == 'swap' or is_ephemeral(device_name)))
def new_format_is_swap(bdm):
if (bdm.get('source_type') == 'blank' and
bdm.get('destination_type') == 'local' and
bdm.get('guest_format') == 'swap'):
return True
return False
def new_format_is_ephemeral(bdm):
if (bdm.get('source_type') == 'blank' and
bdm.get('destination_type') == 'local' and
bdm.get('guest_format') != 'swap'):
return True
return False
def get_root_bdm(bdms):
try:
return next(bdm for bdm in bdms if bdm.get('boot_index', -1) == 0)
except StopIteration:
return None
def get_bdms_to_connect(bdms, exclude_root_mapping=False):
"""Will return non-root mappings, when exclude_root_mapping is true.
Otherwise all mappings will be returned.
"""
return (bdm for bdm in bdms if bdm.get('boot_index', -1) != 0 or
not exclude_root_mapping)
def mappings_prepend_dev(mappings):
"""Prepend '/dev/' to 'device' entry of swap/ephemeral virtual type."""
for m in mappings:
virtual = m['virtual']
if (is_swap_or_ephemeral(virtual) and
(not m['device'].startswith('/'))):
m['device'] = '/dev/' + m['device']
return mappings
_dev = re.compile('^/dev/')
def strip_dev(device_name):
"""remove leading '/dev/'."""
return _dev.sub('', device_name) if device_name else device_name
def prepend_dev(device_name):
"""Make sure there is a leading '/dev/'."""
return device_name and '/dev/' + strip_dev(device_name)
_pref = re.compile('^((x?v|s|h)d)')
def strip_prefix(device_name):
"""remove both leading /dev/ and xvd or sd or vd or hd."""
device_name = strip_dev(device_name)
return _pref.sub('', device_name) if device_name else device_name
_nums = re.compile('\d+')
def get_device_letter(device_name):
letter = strip_prefix(device_name)
# NOTE(vish): delete numbers in case we have something like
# /dev/sda1
return _nums.sub('', letter) if device_name else device_name
def instance_block_mapping(instance, bdms):
root_device_name = instance['root_device_name']
# NOTE(clayg): remove this when xenapi is setting default_root_device
if root_device_name is None:
if driver.is_xenapi():
root_device_name = '/dev/xvda'
else:
return _DEFAULT_MAPPINGS
mappings = {}
mappings['ami'] = strip_dev(root_device_name)
mappings['root'] = root_device_name
default_ephemeral_device = instance.get('default_ephemeral_device')
if default_ephemeral_device:
mappings['ephemeral0'] = default_ephemeral_device
default_swap_device = instance.get('default_swap_device')
if default_swap_device:
mappings['swap'] = default_swap_device
ebs_devices = []
blanks = []
# 'ephemeralN', 'swap' and ebs
for bdm in bdms:
# ebs volume case
if bdm.destination_type == 'volume':
ebs_devices.append(bdm.device_name)
continue
if bdm.source_type == 'blank':
blanks.append(bdm)
# NOTE(yamahata): I'm not sure how ebs device should be numbered.
# Right now sort by device name for deterministic
# result.
if ebs_devices:
ebs_devices.sort()
for nebs, ebs in enumerate(ebs_devices):
mappings['ebs%d' % nebs] = ebs
swap = [bdm for bdm in blanks if bdm.guest_format == 'swap']
if swap:
mappings['swap'] = swap.pop().device_name
ephemerals = [bdm for bdm in blanks if bdm.guest_format != 'swap']
if ephemerals:
for num, eph in enumerate(ephemerals):
mappings['ephemeral%d' % num] = eph.device_name
return mappings
def match_device(device):
"""Matches device name and returns prefix, suffix."""
match = re.match("(^/dev/x{0,1}[a-z]{0,1}d{0,1})([a-z]+)[0-9]*$", device)
if not match:
return None
return match.groups()
def volume_in_mapping(mount_device, block_device_info):
block_device_list = [strip_dev(vol['mount_device'])
for vol in
driver.block_device_info_get_mapping(
block_device_info)]
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
block_device_list.append(strip_dev(swap['device_name']))
block_device_list += [strip_dev(ephemeral['device_name'])
for ephemeral in
driver.block_device_info_get_ephemerals(
block_device_info)]
LOG.debug("block_device_list %s", sorted(filter(None, block_device_list)))
return strip_dev(mount_device) in block_device_list
def get_bdm_ephemeral_disk_size(block_device_mappings):
return sum(bdm.get('volume_size', 0)
for bdm in block_device_mappings
if new_format_is_ephemeral(bdm))
def get_bdm_swap_list(block_device_mappings):
return [bdm for bdm in block_device_mappings
if new_format_is_swap(bdm)]
def get_bdm_local_disk_num(block_device_mappings):
return len([bdm for bdm in block_device_mappings
if bdm.get('destination_type') == 'local'])
|
py | b40aa61560fe0b2da0258cd5c58a063413bf6c3e | #!/usr/bin/env python3
from datetime import datetime
import logging
from pathlib import Path
import sys
import subprocess as sub
"""
Author: herealways
Date: 2020.5.10
This script is responsible for game data's backup.
Usage: ./backup_server.py <directory_stores_backup>
The backup is only performed when the server is running.
Daily backup is executed everyday. It will only backup the conf files and world data.
Weekly backup is executed every Friday. It will backup all the game files including server binary.
Daily backups that are more than 6 days old will be deleted,
while weekly backups that are more than 27 days old will be deleted.
The log file is stored at <directory_stores_backup>/log_file
"""
def check_game_running():
p = sub.run('docker ps | grep bedrock-server', shell=True,
stdout=sub.DEVNULL, stderr=sub.DEVNULL)
if p.returncode != 0:
logging.info('The server is not running and backup will not be\
performed')
sys.exit(1)
logging.debug('Stopping the server before backup...')
sub.run('docker stop bedrock-server', shell=True,
stdout=sub.DEVNULL, stderr=sub.DEVNULL)
def daily_backup(backup_dir):
logging.debug('Starting daily backup...')
command = f'docker run --rm --volumes-from bedrock-server \
-v {backup_dir}/daily_backup:/backup \
ubuntu:18.04 bash -c "tar czf /backup/$(date +%F)_world_backup.tar.gz \
/bedrock-server/{{permissions.json,server.properties,whitelist.json,worlds}}"'
p = sub.run(command, shell=True, stdout=sub.DEVNULL, stderr=sub.DEVNULL)
if p.returncode != 0:
logging.error('Daily backup failed somehow!')
else:
logging.info('Daily backup succeeded.')
def weekly_backup(backup_dir):
# Only performing weekly backup on Friday.
if datetime.now().strftime('%u') != '5':
return
command = f'docker run --rm --volumes-from bedrock-server \
-v {backup_dir}/weekly_backup:/backup \
ubuntu:18.04 bash -c "tar czf /backup/$(date +%F)_full_backup.tar.gz \
/bedrock-server"'
p = sub.run(command, shell=True, stdout=sub.DEVNULL, stderr=sub.DEVNULL)
if p.returncode != 0:
logging.error('Weekly backup failed somehow!')
else:
logging.info('Weekly backup succeeded.')
def restart_server():
sub.run('docker start bedrock-server', shell=True,
stderr=sub.DEVNULL, stdout=sub.DEVNULL)
def remove_stale_backup(backup_dir):
daily_backup_dir = backup_dir / 'daily_backup/'
weekly_backup_dir = backup_dir / 'weekly_backup/'
daily_backup_files = list(daily_backup_dir.glob('*.tar.gz'))
weekly_backup_files = list(weekly_backup_dir.glob('*.tar.gz'))
now = datetime.now()
# Delete daily backup files that are older than 7 days
for f in daily_backup_files:
if (now - datetime.fromtimestamp(f.stat().st_mtime)).days > 6:
f.unlink()
# Delete weekly backup files that are older than 4 weeks
for f in weekly_backup_files:
if (now - datetime.fromtimestamp(f.stat().st_mtime)).days > 27:
f.unlink()
if __name__ == "__main__":
backup_dir = Path(sys.argv[1])
log_file = backup_dir / 'backup_log'
logging.basicConfig(level=logging.INFO, filename=log_file,
format='%(asctime)s - %(message)s')
check_game_running()
daily_backup(backup_dir)
weekly_backup(backup_dir)
restart_server()
remove_stale_backup(backup_dir)
|
py | b40aa655b1d9a9b27a1cf8e82c4d07f0a3bfdc1c | #!/usr/bin/env python3
import logging
import sys
import unittest
import numpy as np
import orbitx.orbitx_pb2 as protos
from orbitx.physics import calc, ode_solver
from orbitx import common
from orbitx import logs
from orbitx import network
from orbitx import physics
from orbitx.data_structures import (
EngineeringState, _EntityView, Entity, PhysicsState,
_N_COMPONENTS, _N_COOLANT_LOOPS, _N_RADIATORS, ComponentCoolantCnxn
)
from orbitx.strings import HABITAT
log = logging.getLogger()
class PhysicsEngine:
"""Ensures that the simthread is always shut down on test exit/failure."""
def __init__(self, savefile):
self.physics_engine = physics.PhysicsEngine(
common.load_savefile(common.savefile(savefile)))
def __enter__(self):
return self.physics_engine
def __exit__(self, *args):
self.physics_engine._stop_simthread()
class PhysicsEngineTestCase(unittest.TestCase):
"""Test the motion of the simulated system is correct."""
def test_simple_collision(self):
"""Test elastic collisions of two small-mass objects colliding."""
with PhysicsEngine('tests/simple-collision.json') as physics_engine:
# In this case, the first entity is standing still and the second
# on a collision course going left to right. The two should bounce.
# Entity 0 has r=50 and everything else 0.
# Entity 2 has r=30, x=-500, vx=10, and everything else 0.
# There's also entity 1, which is far away and shouldn't interact.
# Let's do some math oh hey, they should collide at t=42.
approach = physics_engine.get_state(41)
bounced = physics_engine.get_state(43)
self.assertTrue(approach[0].x > approach[2].x)
self.assertTrue(approach[2].vx > 0)
self.assertTrue(bounced[0].x > bounced[2].x)
self.assertTrue(bounced[2].vx < 0)
self.assertEqual(
round(approach[1].vy),
round(bounced[1].vy))
def test_basic_movement(self):
"""Test that a moving object changes its position."""
with PhysicsEngine('tests/only-sun.json') as physics_engine:
# In this case, the only entity is the Sun. It starts at (0, 0)
# with a speed of (1, -1). It should move.
t_delta = 100
initial = physics_engine.get_state(0)
moved = physics_engine.get_state(100)
self.assertEqual(initial.timestamp, 0)
self.assertAlmostEqual(initial[0].x, 0)
self.assertAlmostEqual(initial[0].y, 0)
self.assertAlmostEqual(initial[0].vx, 1)
self.assertAlmostEqual(initial[0].vy, -1)
self.assertEqual(moved.timestamp, t_delta)
self.assertAlmostEqual(moved[0].x, t_delta)
self.assertAlmostEqual(moved[0].y, -t_delta)
self.assertAlmostEqual(moved[0].vx, 1)
self.assertAlmostEqual(moved[0].vy, -1)
def test_gravitation(self):
"""Test that gravitational acceleration at t=0 is as expected."""
with PhysicsEngine('tests/massive-objects.json') as physics_engine:
# In this case, the first entity is very massive and the second
# entity should gravitate towards the first entity.
initial = physics_engine.get_state(0)
moved = physics_engine.get_state(1)
# https://www.wolframalpha.com/input/?i=1e30+kg+*+G+%2F+(1e8+m)%5E2
# According to the above, this should be somewhere between 6500 and
# 7000 m/s after one second.
self.assertTrue(moved[1].vx > 5000,
msg=f'vx is actually {moved[1].vx}')
# Test the internal math that the internal derive function is doing
# the right calculations. Break out your SPH4U physics equations!!
y0 = initial
# Note that dy.X is actually the velocity at 0,
# and dy.VX is acceleration.
dy = PhysicsState(
ode_solver.simulation_differential_function(
0, y0.y0(), y0._proto_state, physics_engine.M, physics_engine._artificials),
y0._proto_state)
self.assertEqual(len(dy.X), 2)
self.assertAlmostEqual(dy.X[0], y0.VX[0])
self.assertAlmostEqual(dy.Y[0], y0.VY[0])
self.assertEqual(round(abs(dy.VX[0])),
round(common.G * initial[1].mass /
(y0.X[0] - y0.X[1]) ** 2))
self.assertAlmostEqual(dy.VY[0], 0)
self.assertAlmostEqual(dy.X[1], y0.VX[1])
self.assertAlmostEqual(dy.Y[1], y0.VY[1])
self.assertEqual(round(abs(dy.VX[1])),
round(common.G * initial[0].mass /
(y0.X[1] - y0.X[0]) ** 2))
self.assertAlmostEqual(dy.VY[1], 0)
def test_engines(self):
"""Test that engines use fuel and accelerate at the expected."""
with PhysicsEngine('tests/habitat.json') as physics_engine:
# In this test case, there is a single entity that has 300 kg fuel.
# heading, velocity, and position are all 0.
throttle = 1
t_delta = 5
physics_engine.handle_requests([
network.Request(
ident=network.Request.HAB_THROTTLE_SET,
throttle_set=throttle)],
requested_t=0)
initial = physics_engine.get_state(0)
moved = physics_engine.get_state(t_delta)
self.assertAlmostEqual(initial[0].heading, 0)
self.assertAlmostEqual(
moved[0].fuel,
(initial[0].fuel -
t_delta * throttle *
common.craft_capabilities[HABITAT].fuel_cons))
self.assertTrue(
moved[0].vx <
(t_delta * calc.engine_acceleration(moved)))
t_no_fuel = (initial[0].fuel
/ (throttle *
common.craft_capabilities[HABITAT].fuel_cons
)
)
empty_fuel = physics_engine.get_state(t_no_fuel)
after_empty_fuel = physics_engine.get_state(t_no_fuel + t_delta)
self.assertEqual(round(empty_fuel[0].fuel), 0)
self.assertEqual(round(after_empty_fuel[0].vx),
round(empty_fuel[0].vx))
def test_srbs(self):
"""Test that SRBs move the craft, and run out of fuel."""
with PhysicsEngine('tests/habitat.json') as physics_engine:
t_delta = 5
physics_engine.handle_requests(
[network.Request(ident=network.Request.IGNITE_SRBS)],
requested_t=0)
initial = physics_engine.get_state(0)
moved = physics_engine.get_state(t_delta)
self.assertAlmostEqual(initial[0].heading, 0)
self.assertAlmostEqual(initial[0].vx, 0)
self.assertAlmostEqual(moved[0].vx,
t_delta * calc.engine_acceleration(moved))
srb_empty = physics_engine.get_state(common.SRB_BURNTIME)
after_srb_empty = physics_engine.get_state(common.SRB_BURNTIME + 5)
self.assertAlmostEqual(srb_empty[0].vx, after_srb_empty[0].vx)
def test_three_body(self):
"""Test gravitational acceleration between three bodies is expected."""
with PhysicsEngine('tests/three-body.json') as physics_engine:
# In this case, three entities form a 90-45-45 triangle, with the
# entity at the right angle being about as massive as the sun.
# The first entity is the massive entity, the second is far to the
# left, and the third is far to the top.
physics_state = physics_engine.get_state(0)
# Test that every single entity has the correct accelerations.
y0 = physics_state
dy = PhysicsState(
ode_solver.simulation_differential_function(
0, y0.y0(), y0._proto_state, physics_engine.M, physics_engine._artificials),
physics_state._proto_state)
self.assertEqual(len(dy.X), 3)
self.assertAlmostEqual(dy.X[0], y0.VX[0])
self.assertAlmostEqual(dy.Y[0], y0.VY[0])
self.assertEqual(round(abs(dy.VX[0])),
round(common.G * physics_state[1].mass /
(y0.X[0] - y0.X[1]) ** 2))
self.assertEqual(round(abs(dy.VY[0])),
round(common.G * physics_state[2].mass /
(y0.Y[0] - y0.Y[2]) ** 2))
self.assertAlmostEqual(dy.X[1], y0.VX[1])
self.assertAlmostEqual(dy.Y[1], y0.VY[1])
self.assertEqual(round(abs(dy.VX[1])),
round(common.G * physics_state[0].mass /
(y0.X[1] - y0.X[0]) ** 2 +
np.sqrt(2) * common.G *
physics_state[2].mass /
(y0.X[1] - y0.X[2]) ** 2
))
self.assertEqual(round(abs(dy.VY[1])),
round(np.sqrt(2) * common.G *
physics_state[2].mass /
(y0.X[1] - y0.X[2]) ** 2))
self.assertAlmostEqual(dy.X[2], y0.VX[2])
self.assertAlmostEqual(dy.Y[2], y0.VY[2])
self.assertEqual(round(abs(dy.VX[2])),
round(np.sqrt(2) * common.G *
physics_state[2].mass /
(y0.X[1] - y0.X[2]) ** 2))
self.assertEqual(round(abs(dy.VY[2])),
round(
common.G * physics_state[0].mass /
(y0.Y[2] - y0.Y[0]) ** 2 +
np.sqrt(2) * common.G * physics_state[1].mass
/ (y0.Y[2] - y0.Y[1]) ** 2
))
def test_landing(self):
with PhysicsEngine('tests/artificial-collision.json') \
as physics_engine:
# This case is the same as simple-collision, but the first entity
# has the artificial flag set. Thus it should land and stick.
# As in simple-collision, the collision happens at about t = 42.
before = physics_engine.get_state(40)
after = physics_engine.get_state(50)
assert before[0].artificial
assert not before[2].artificial
self.assertTrue(before[0].x > before[2].x)
self.assertTrue(before[2].vx > 0)
self.assertAlmostEqual(after[0].vx, after[2].vx)
self.assertAlmostEqual(after[0].x,
(after[2].x +
after[0].r +
after[2].r))
def test_longterm_stable_landing(self):
"""Test that landed ships have stable altitude in the long term."""
savestate = common.load_savefile(common.savefile('OCESS.json'))
initial_t = savestate.timestamp
with PhysicsEngine('OCESS.json') as physics_engine:
initial = physics_engine.get_state(initial_t + 10)
physics_engine.handle_requests(
[network.Request(ident=network.Request.TIME_ACC_SET,
time_acc_set=common.TIME_ACCS[-1].value)],
requested_t=initial_t + 10)
final = physics_engine.get_state(initial_t + 100_000)
self.assertAlmostEqual(
calc.fastnorm(initial['Earth'].pos - initial['Habitat'].pos),
initial['Earth'].r + initial['Habitat'].r,
delta=1)
self.assertAlmostEqual(
calc.fastnorm(final['Earth'].pos - final['Habitat'].pos),
final['Earth'].r + final['Habitat'].r,
delta=1)
def test_drag(self):
"""Test that drag is small but noticeable during unpowered flight."""
atmosphere_save = common.load_savefile(common.savefile(
'tests/atmosphere.json'))
# The habitat starts 1 km in the air, the same speed as the Earth.
hab = atmosphere_save.craft_entity()
hab.vy += 10
atmosphere_save[atmosphere_save.craft] = hab
drag = calc.fastnorm(calc.drag(atmosphere_save))
self.assertLess(59, drag)
self.assertGreater(60, drag)
class EntityTestCase(unittest.TestCase):
"""Tests that state.Entity properly proxies underlying proto."""
def test_fields(self):
def test_field(pe: Entity, field: str, val):
pe.proto.Clear()
setattr(pe, field, val)
self.assertEqual(getattr(pe.proto, field), val)
pe = Entity(protos.Entity())
test_field(pe, 'name', 'test')
test_field(pe, 'x', 5)
test_field(pe, 'y', 5)
test_field(pe, 'vx', 5)
test_field(pe, 'vy', 5)
test_field(pe, 'r', 5)
test_field(pe, 'mass', 5)
test_field(pe, 'heading', 5)
test_field(pe, 'spin', 5)
test_field(pe, 'fuel', 5)
test_field(pe, 'throttle', 5)
test_field(pe, 'landed_on', 'other_test')
test_field(pe, 'broken', True)
test_field(pe, 'artificial', True)
class PhysicsStateTestCase(unittest.TestCase):
"""Tests state.PhysicsState accessors and setters."""
proto_state = protos.PhysicalState(
timestamp=5,
entities=[
protos.Entity(
name='First', mass=100, r=200,
x=10, y=20, vx=30, vy=40, heading=7, spin=50, fuel=60,
throttle=70),
protos.Entity(
name='Second', mass=101, r=201, artificial=True,
x=11, y=21, vx=31, vy=41, heading=2, spin=51, fuel=61,
throttle=71, landed_on='First', broken=True)
],
engineering=protos.EngineeringState(
components=[protos.EngineeringState.Component()] * _N_COMPONENTS,
coolant_loops=[protos.EngineeringState.CoolantLoop()] * _N_COOLANT_LOOPS,
radiators=[protos.EngineeringState.Radiator()] * _N_RADIATORS
)
)
def test_landed_on(self):
"""Test that the special .landed_on field is properly set."""
ps = PhysicsState(None, self.proto_state)
self.assertEqual(ps['First'].landed_on, '')
self.assertEqual(ps['Second'].landed_on, 'First')
def test_y_vector_init(self):
"""Test that initializing with a y-vector uses y-vector values."""
y0 = np.concatenate((np.array([
10, 20, # x
30, 40, # y
50, 60, # vx
0, 0, # vy
0, 0, # heading
70, 80, # spin
90, 100, # fuel
0, 0, # throttle
1, -1, # only First is landed on Second
0, 1, # Second is broken
common.SRB_EMPTY,
1 # time_acc
]),
np.zeros(EngineeringState.N_ENGINEERING_FIELDS)
))
ps = PhysicsState(y0, self.proto_state)
self.assertTrue(np.array_equal(ps.y0(), y0.astype(ps.y0().dtype)))
self.assertEqual(ps['First'].landed_on, 'Second')
proto_state = ps.as_proto()
proto_state.timestamp = 50
self.assertEqual(proto_state.timestamp, 50)
self.assertEqual(proto_state.entities[0].fuel, 90)
self.assertTrue(proto_state.entities[1].broken)
def test_get_set(self):
"""Test __getitem__ and __setitem__."""
ps = PhysicsState(None, self.proto_state)
entity = ps[0]
entity.landed_on = 'Second'
ps[0] = entity
self.assertEqual(ps[0].landed_on, 'Second')
def test_entity_view(self):
"""Test that setting and getting _EntityView attrs propagate."""
ps = PhysicsState(None, self.proto_state)
self.assertEqual(ps[0].name, 'First')
entity = ps[0]
self.assertTrue(isinstance(entity, _EntityView))
self.assertEqual(entity.x, 10)
self.assertEqual(entity.y, 20)
self.assertEqual(entity.vx, 30)
self.assertEqual(entity.vy, 40)
self.assertEqual(entity.spin, 50)
self.assertEqual(entity.fuel, 60)
self.assertEqual(entity.landed_on, '')
self.assertEqual(entity.throttle, 70)
ps.y0()
self.assertEqual(entity.heading, 7 % (2 * np.pi))
ps[0].landed_on = 'Second'
self.assertEqual(entity.landed_on, 'Second')
entity.x = 500
self.assertEqual(ps[0].x, 500)
entity.pos = np.array([55, 66])
self.assertEqual(ps['First'].x, 55)
self.assertEqual(ps['First'].y, 66)
class CalculationsTestCase(unittest.TestCase):
"""Tests instantaneous orbit parameter calculations.
The file tests/gui-test.json encodes the position of the Earth and the
ISS, with all possitions offset by a billion metres along the x and y axes.
https://www.wolframalpha.com/input/?i=International+Space+Station
describes the orbital parameters of the ISS, all numbers in this test are
taken from that page."""
def test_elliptical_orbital_parameters(self):
# Again, see
# https://www.wolframalpha.com/input/?i=International+Space+Station
# For these expected values
physics_state = common.load_savefile(common.savefile(
'tests/gui-test.json'))
iss = physics_state[0]
earth = physics_state[1]
# The semiaxes are relatively close to expected.
self.assertAlmostEqual(
calc.semimajor_axis(iss, earth), 6785e3, delta=0.01 * earth.r)
# The eccentricity is within 1e-6 of the expected.
self.assertAlmostEqual(
calc.fastnorm(calc.eccentricity(iss, earth)),
5.893e-4, delta=1e-3)
# The apoapsis is relatively close to expected.
self.assertAlmostEqual(
calc.apoapsis(iss, earth), 418.3e3, delta=0.01 * earth.r)
# The periapsis is relatively close to expected.
self.assertAlmostEqual(
calc.periapsis(iss, earth), 410.3e3, delta=0.01 * earth.r)
def test_hyperbolic_orbital_parameters(self):
# Unlike the elliptical test, this tests our favourite extra-solar
# visitor to make sure we can calculate Keplerian orbital
# characteristics from its orbital state vectors! That's right, we're
# talking about Sedna! The expected values are arrived at through
# calculation, and also
# http://orbitsimulator.com/formulas/OrbitalElements.html
physics_state = common.load_savefile(common.savefile(
'tests/sedna.json'))
sun = physics_state[0]
oumuamua = physics_state[1]
expected_semimajor_axis = -71231070.14146987
self.assertAlmostEqual(
calc.semimajor_axis(oumuamua, sun), expected_semimajor_axis,
delta=abs(0.01 * expected_semimajor_axis))
expected_eccentricity = 1644.477
self.assertAlmostEqual(
calc.fastnorm(calc.eccentricity(oumuamua, sun)),
expected_eccentricity, delta=0.01 * expected_eccentricity)
expected_periapsis = 1.1714e11 # Through calculation
self.assertAlmostEqual(
calc.periapsis(sun, oumuamua) + oumuamua.r, expected_periapsis,
delta=0.01 * 78989185420.15271)
def test_speeds(self):
physics_state = common.load_savefile(common.savefile(
'tests/gui-test.json'))
iss = physics_state[0]
earth = physics_state[1]
self.assertAlmostEqual(calc.h_speed(iss, earth), 7665, delta=10)
self.assertAlmostEqual(calc.v_speed(iss, earth), -0.1, delta=0.1)
class EngineeringViewTestCase(unittest.TestCase):
"""Test that the various accessors of EngineeringState are correct."""
def test_component_accessors(self):
with PhysicsEngine('tests/engineering-test.json') as physics_engine:
engineering = physics_engine.get_state().engineering
# Test getters work
self.assertEqual(engineering.components[0].connected, True)
self.assertAlmostEqual(engineering.components[0].temperature, 31.3)
self.assertAlmostEqual(engineering.components[0].resistance, 11.0)
self.assertAlmostEqual(engineering.components[0].voltage, 120.0)
self.assertAlmostEqual(engineering.components[0].current, 0.2)
self.assertEqual(engineering.components[0].coolant_connection,
ComponentCoolantCnxn.HAB_ONE)
self.assertAlmostEqual(engineering.components[0].get_coolant_loops()[0].coolant_temp, 15.0)
# Test setters work
engineering.components[1].connected = True
engineering.components[1].temperature = 12.3
engineering.components[1].resistance = 4.56
engineering.components[1].voltage = 7.89
engineering.components[1].current = 0.1
engineering.components[1].coolant_connection = ComponentCoolantCnxn.HAB_TWO
self.assertEqual(engineering.components[1].connected, True)
self.assertAlmostEqual(engineering.components[1].temperature, 12.3)
self.assertAlmostEqual(engineering.components[1].resistance, 4.56)
self.assertAlmostEqual(engineering.components[1].voltage, 7.89)
self.assertAlmostEqual(engineering.components[1].current, 0.1)
self.assertEqual(engineering.components[1].coolant_connection,
ComponentCoolantCnxn.HAB_TWO)
self.assertAlmostEqual(engineering.components[1].get_coolant_loops()[0].coolant_temp, 20.0)
def test_as_proto(self):
with PhysicsEngine('tests/engineering-test.json') as physics_engine:
state = physics_engine.get_state()
engineering = state.engineering
# Change some data
engineering.components[1].connected = True
engineering.components[1].temperature = 12.3
engineering.components[1].resistance = 4.56
engineering.components[1].voltage = 7.89
engineering.components[1].current = 0.1
# Check engineering proto
eng_proto = engineering.as_proto()
self.assertEqual(eng_proto.components[1].connected, True)
self.assertAlmostEqual(eng_proto.components[1].temperature, 12.3)
self.assertAlmostEqual(eng_proto.components[1].resistance, 4.56)
self.assertAlmostEqual(eng_proto.components[1].voltage, 7.89)
self.assertAlmostEqual(eng_proto.components[1].current, 0.1)
# Check physicsstate proto
physics_state_proto = state.as_proto()
self.assertEqual(physics_state_proto.engineering.components[1].connected, True)
self.assertAlmostEqual(physics_state_proto.engineering.components[1].temperature, 12.3)
self.assertAlmostEqual(physics_state_proto.engineering.components[1].resistance, 4.56)
self.assertAlmostEqual(physics_state_proto.engineering.components[1].voltage, 7.89)
self.assertAlmostEqual(physics_state_proto.engineering.components[1].current, 0.1)
def test_coolant_accessors(self):
with PhysicsEngine('tests/engineering-test.json') as physics_engine:
engineering = physics_engine.get_state().engineering
# Test getters work
self.assertAlmostEqual(engineering.coolant_loops[0].coolant_temp, 15.0)
self.assertEqual(engineering.coolant_loops[0].primary_pump_on, True)
self.assertEqual(engineering.coolant_loops[0].secondary_pump_on, True)
# Test setters work
engineering.coolant_loops[1].coolant_temp = 33.3
engineering.coolant_loops[1].primary_pump_on = False
engineering.coolant_loops[1].secondary_pump_on = True
self.assertAlmostEqual(engineering.coolant_loops[1].coolant_temp, 33.3)
self.assertEqual(engineering.coolant_loops[1].primary_pump_on, False)
self.assertEqual(engineering.coolant_loops[1].secondary_pump_on, True)
def test_radiator_accessors(self):
with PhysicsEngine('tests/engineering-test.json') as physics_engine:
engineering = physics_engine.get_state().engineering
# Test getters work
self.assertEqual(engineering.radiators[0].attached_to_coolant_loop, 1)
self.assertEqual(engineering.radiators[0].functioning, True)
self.assertEqual(engineering.radiators[0].get_coolant_loop().coolant_temp, 15)
# Test setters work
engineering.radiators[1].attached_to_coolant_loop = 2
engineering.radiators[1].functioning = False
self.assertEqual(engineering.radiators[1].attached_to_coolant_loop, 2)
self.assertEqual(engineering.radiators[1].functioning, False)
self.assertEqual(engineering.radiators[1].get_coolant_loop().coolant_temp, 20)
def test_numpy_arrays_not_copied(self):
"""Test that the internal array representation of EngineeringState is
just a view into PhysicsState._array_rep, otherwise EngineeringState will
write new data into the ether and it won't update PhysicsState.y0()."""
with PhysicsEngine('tests/engineering-test.json') as physics_engine:
state = physics_engine.get_state()
engineering = state.engineering
engineering.components[0].voltage = 777777.7
self.assertEqual(engineering._array[3], 777777.7)
self.assertEqual(state.y0()[state.ENGINEERING_START_INDEX + 3], 777777.7)
def test_alarms(self):
with PhysicsEngine('tests/engineering-test.json') as physics_engine:
engineering = physics_engine.get_state().engineering
self.assertEqual(engineering.master_alarm, False)
self.assertEqual(engineering.radiation_alarm, False)
self.assertEqual(engineering.asteroid_alarm, False)
self.assertEqual(engineering.hab_reactor_alarm, False)
self.assertEqual(engineering.ayse_reactor_alarm, False)
self.assertEqual(engineering.hab_gnomes, False)
engineering.master_alarm = True
engineering.radiation_alarm = True
engineering.asteroid_alarm = True
engineering.hab_reactor_alarm = True
engineering.ayse_reactor_alarm = True
engineering.hab_gnomes = True
self.assertEqual(engineering.master_alarm, True)
self.assertEqual(engineering.radiation_alarm, True)
self.assertEqual(engineering.asteroid_alarm, True)
self.assertEqual(engineering.hab_reactor_alarm, True)
self.assertEqual(engineering.ayse_reactor_alarm, True)
self.assertEqual(engineering.hab_gnomes, True)
def test_misc_accessors(self):
with PhysicsEngine('tests/engineering-test.json') as physics_engine:
physics_state = physics_engine.get_state()
self.assertAlmostEqual(physics_state.engineering.habitat_fuel, 100)
physics_state[HABITAT].fuel = 50.0
self.assertAlmostEqual(physics_state.engineering.habitat_fuel, 50)
def test_performance():
# This just runs for 10 seconds and collects profiling data.
import time
with PhysicsEngine('OCESS.json') as physics_engine:
physics_engine.handle_requests([
network.Request(ident=network.Request.TIME_ACC_SET,
time_acc_set=common.TIME_ACCS[-2].value)])
end_time = time.time() + 10
print(f"Profiling performance for {end_time - time.time()} seconds.")
common.start_profiling()
while time.time() < end_time:
time.sleep(0.05)
physics_engine.get_state()
if __name__ == '__main__':
logs.make_program_logfile('test')
if '-v' in sys.argv:
logs.enable_verbose_logging()
if 'profile' in sys.argv:
test_performance()
else:
unittest.main()
|
py | b40aa75ba25857ab57b2c2502fe1813089a65a51 | import os, sys
# example: python train_run.py keyword temp_keyword _
if __name__ == '__main__':
mode = sys.argv[1]
control_mode = sys.argv[2]
use_prefixtuning = (sys.argv[3] == 'yes')
model_file = None
old_model = None
MODEL_FILE = sys.argv[4]
MODEL_FILE_second = sys.argv[5]
split_file = sys.argv[6]
if mode == 'embMatch' and not use_prefixtuning:
MODEL_FILE="/u/scr/xlisali/contrast_LM/transformers/examples/language-modeling/temp_medium_matching_cleanbert"
Token_FILE="/u/scr/xlisali/contrast_LM/transformers/examples/language-modeling/temp_medium_matching_cleanbert"
SENT_FILE = '/u/scr/xlisali/contrast_LM/data_api/dataset/matching_train_small.txt'
SENT_FILE='/u/scr/xlisali/contrast_LM/data_api/dataset/matching_dev_small.txt'
if model_file:
Model_FILE = model_file
else:
Model_FILE="medium_matching"
if old_model != '_':
OLD_MODEL=old_model
else:
OLD_MODEL="gpt2-medium"
if mode == 'keyword' and not use_prefixtuning:
if control_mode == 'no':
MODEL_FILE = "/u/scr/xlisali/contrast_LM/transformers/examples/language-modeling/temp_keyword"
Token_FILE = "/u/scr/xlisali/contrast_LM/transformers/examples/language-modeling/temp_keyword"
else:
# MODEL_FILE = "/u/scr/xlisali/contrast_LM/transformers/examples/control/keyword_temp"
# Token_FILE = "/u/scr/xlisali/contrast_LM/transformers/examples/control/keyword_temp"
# mid
MODEL_FILE = "/u/scr/xlisali/contrast_LM/transformers/examples/control/keyword_temp2/checkpoint-90000"
Token_FILE = "/u/scr/xlisali/contrast_LM/transformers/examples/language-modeling/temp_keyword"
#
# MODEL_FILE = "gpt2-medium"
# Token_FILE = "gpt2-medium"
TRAIN_FILE='/u/scr/xlisali/contrast_LM/data_api/dataset/matching_train_small.txt'
TEST_FILE='/u/scr/xlisali/contrast_LM/data_api/dataset/matching_dev_small.txt'
if model_file:
Model_FILE = model_file
else:
Model_FILE="medium_matching"
if old_model != '_':
OLD_MODEL=old_model
else:
OLD_MODEL="gpt2-medium"
elif mode == 'topic':
if control_mode == 'no':
pass
# MODEL_FILE = "/u/scr/xlisali/contrast_LM/transformers/examples/language-modeling/temp_topic"
# Token_FILE = "/u/scr/xlisali/contrast_LM/transformers/examples/language-modeling/temp_topic"
else:
# MODEL_FILE = "/u/scr/xlisali/contrast_LM/transformers/examples/control/topic_temp"
# Token_FILE = "/u/scr/xlisali/contrast_LM/transformers/examples/control/topic_temp"
# MODEL_FILE = "gpt2-medium"
# Token_FILE = "gpt2-medium"
# mid
# MODEL_FILE = "/u/scr/xlisali/contrast_LM/transformers/examples/control/topic_temp2"
# Token_FILE = "/u/scr/xlisali/contrast_LM/transformers/examples/control/topic_temp2"
MODEL_FILE = "/u/scr/xlisali/contrast_LM/transformers/examples/control/topicprefixtune"
Token_FILE = "/u/scr/xlisali/contrast_LM/transformers/examples/control/topicprefixtune"
TRAIN_FILE = None
TEST_FILE = None
if 'finetune' in MODEL_FILE:
tuning_mode = 'finetune'
app = ''
elif 'prefixtune' in MODEL_FILE:
tuning_mode = 'prefixtune'
# if 'inputpara' not in MODEL_FILE and "-emb-" not in MODEL_FILE:
# app = '--optim_prefix {} --preseqlen {}'.format('yes', 10)
# else:
app = '--optim_prefix {} --preseqlen {} '.format('no', 10)
if "-emb" in MODEL_FILE:
app += "--prefix_mode embedding "
MODEL_FILE2 = MODEL_FILE
MODEL_FILE = 'gpt2-medium'
elif mode =='data2text':
Token_FILE = MODEL_FILE
if 'finetune' in MODEL_FILE:
tuning_mode = 'finetune'
app = ''
elif 'prefixtune' in MODEL_FILE:
tuning_mode = 'prefixtune'
if "_y_20" in MODEL_FILE:
app = '--optim_prefix {} --preseqlen {} '.format('yes', 20)
else:
app = '--optim_prefix {} --preseqlen {} '.format('no', 20)
if "_emb" in MODEL_FILE:
app += "--prefix_mode embedding "
elif "_act" in MODEL_FILE:
app += "--prefix_mode activation "
if "_inf" in MODEL_FILE or 'infix' in MODEL_FILE:
app += " --format_mode infix "
elif "_cat" in MODEL_FILE:
app += " --format_mode cat "
elif "_pee" in MODEL_FILE:
app += " --format_mode peek "
MODEL_FILE2 = MODEL_FILE
MODEL_FILE2_second = MODEL_FILE_second
MODEL_FILE = 'gpt2-medium'
elif mode == 'triples':
Token_FILE = MODEL_FILE
if 'finetune' in MODEL_FILE:
tuning_mode = 'finetune'
app = ''
elif 'prefixtune' in MODEL_FILE:
tuning_mode = 'prefixtune'
if "tune_y_" in MODEL_FILE:
app = '--optim_prefix {} --preseqlen {} '.format('yes', 20)
else:
app = '--optim_prefix {} --preseqlen {} '.format('no', 20)
if "_emb" in MODEL_FILE:
app += "--prefix_mode embedding "
elif "_act" in MODEL_FILE:
app += "--prefix_mode activation "
if "_inf" in MODEL_FILE or 'infix' in MODEL_FILE:
app += " --format_mode infix "
elif "_cat" in MODEL_FILE:
app += " --format_mode cat "
elif "_pee" in MODEL_FILE:
app += " --format_mode peek "
MODEL_FILE2 = MODEL_FILE
MODEL_FILE = 'gpt2-medium'
elif mode == 'webnlg':
Token_FILE = MODEL_FILE
if 'finetune' in MODEL_FILE:
tuning_mode = 'finetune'
app = ''
elif 'prefixtune' in MODEL_FILE:
tuning_mode = 'prefixtune'
if "tune_y_" in MODEL_FILE:
app = '--optim_prefix {} --preseqlen {} '.format('yes', 20)
else:
app = '--optim_prefix {} --preseqlen {} '.format('no', 20)
if "_emb" in MODEL_FILE:
app += "--prefix_mode embedding "
elif "_act" in MODEL_FILE:
app += "--prefix_mode activation "
if "_inf" in MODEL_FILE or 'infix' in MODEL_FILE:
app += " --format_mode infix "
elif "_cat" in MODEL_FILE:
app += " --format_mode cat "
elif "_pee" in MODEL_FILE:
app += " --format_mode peek "
MODEL_FILE2 = MODEL_FILE
MODEL_FILE = 'gpt2-medium'
COMMANDLINE = "python run_compose.py \
--model_type=gpt2 \
--length 100 \
--model_name_or_path={} \
--num_return_sequences 5 \
--stop_token [EOS] \
--tokenizer_name={} \
--task_mode={} \
--control_mode={} --tuning_mode {} --split_file {}\
".format(MODEL_FILE, Token_FILE, mode, control_mode, tuning_mode, split_file)
COMMANDLINE += app
if tuning_mode == 'prefixtune':
COMMANDLINE += ' --prefixModel_name_or_path {} --prefixModel_name_or_path2 {} '.format(MODEL_FILE2, MODEL_FILE2_second)
else:
COMMANDLINE += ' --model_name_or_path2 {} '.format(MODEL_FILE_second)
os.system(COMMANDLINE)
# name = os.path.basename(MODEL_FILE2)
# name = 'e2e_results_new/{}'.format(name)
# full_command = "nlprun -a lisa-base-torch -g 1 -n {} -x jagupard4,jagupard5,jagupard6,jagupard7,jagupard8 \'{}\'".format(name,COMMANDLINE)
# print(full_command)
# os.system(full_command)
|
py | b40aa96308f2f4cecda8099b92990d3d52acf195 | # Copyright 2019 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interceptors implementation of gRPC Asyncio Python."""
import asyncio
import collections
import functools
from abc import ABCMeta, abstractmethod
from typing import Callable, Optional, Iterator, Sequence, Union, Awaitable, AsyncIterable
import grpc
from grpc._cython import cygrpc
from . import _base_call
from ._call import UnaryUnaryCall, UnaryStreamCall, StreamUnaryCall, StreamStreamCall, AioRpcError
from ._call import _RPC_ALREADY_FINISHED_DETAILS, _RPC_HALF_CLOSED_DETAILS
from ._call import _API_STYLE_ERROR
from ._utils import _timeout_to_deadline
from ._typing import (RequestType, SerializingFunction, DeserializingFunction,
ResponseType, DoneCallbackType, RequestIterableType,
ResponseIterableType)
from ._metadata import Metadata
_LOCAL_CANCELLATION_DETAILS = 'Locally cancelled by application!'
class ServerInterceptor(metaclass=ABCMeta):
"""Affords intercepting incoming RPCs on the service-side.
This is an EXPERIMENTAL API.
"""
@abstractmethod
async def intercept_service(
self, continuation: Callable[[grpc.HandlerCallDetails],
Awaitable[grpc.RpcMethodHandler]],
handler_call_details: grpc.HandlerCallDetails
) -> grpc.RpcMethodHandler:
"""Intercepts incoming RPCs before handing them over to a handler.
Args:
continuation: A function that takes a HandlerCallDetails and
proceeds to invoke the next interceptor in the chain, if any,
or the RPC handler lookup logic, with the call details passed
as an argument, and returns an RpcMethodHandler instance if
the RPC is considered serviced, or None otherwise.
handler_call_details: A HandlerCallDetails describing the RPC.
Returns:
An RpcMethodHandler with which the RPC may be serviced if the
interceptor chooses to service this RPC, or None otherwise.
"""
class ClientCallDetails(
collections.namedtuple(
'ClientCallDetails',
('method', 'timeout', 'metadata', 'credentials', 'wait_for_ready')),
grpc.ClientCallDetails):
"""Describes an RPC to be invoked.
This is an EXPERIMENTAL API.
Args:
method: The method name of the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
metadata: Optional metadata to be transmitted to the service-side of
the RPC.
credentials: An optional CallCredentials for the RPC.
wait_for_ready: This is an EXPERIMENTAL argument. An optional
flag to enable :term:`wait_for_ready` mechanism.
"""
method: str
timeout: Optional[float]
metadata: Optional[Metadata]
credentials: Optional[grpc.CallCredentials]
wait_for_ready: Optional[bool]
class ClientInterceptor(metaclass=ABCMeta):
"""Base class used for all Aio Client Interceptor classes"""
class UnaryUnaryClientInterceptor(ClientInterceptor, metaclass=ABCMeta):
"""Affords intercepting unary-unary invocations."""
@abstractmethod
async def intercept_unary_unary(
self, continuation: Callable[[ClientCallDetails, RequestType],
UnaryUnaryCall],
client_call_details: ClientCallDetails,
request: RequestType) -> Union[UnaryUnaryCall, ResponseType]:
"""Intercepts a unary-unary invocation asynchronously.
Args:
continuation: A coroutine that proceeds with the invocation by
executing the next interceptor in the chain or invoking the
actual RPC on the underlying Channel. It is the interceptor's
responsibility to call it if it decides to move the RPC forward.
The interceptor can use
`call = await continuation(client_call_details, request)`
to continue with the RPC. `continuation` returns the call to the
RPC.
client_call_details: A ClientCallDetails object describing the
outgoing RPC.
request: The request value for the RPC.
Returns:
An object with the RPC response.
Raises:
AioRpcError: Indicating that the RPC terminated with non-OK status.
asyncio.CancelledError: Indicating that the RPC was canceled.
"""
class UnaryStreamClientInterceptor(ClientInterceptor, metaclass=ABCMeta):
"""Affords intercepting unary-stream invocations."""
@abstractmethod
async def intercept_unary_stream(
self, continuation: Callable[[ClientCallDetails, RequestType],
UnaryStreamCall],
client_call_details: ClientCallDetails, request: RequestType
) -> Union[ResponseIterableType, UnaryStreamCall]:
"""Intercepts a unary-stream invocation asynchronously.
The function could return the call object or an asynchronous
iterator, in case of being an asyncrhonous iterator this will
become the source of the reads done by the caller.
Args:
continuation: A coroutine that proceeds with the invocation by
executing the next interceptor in the chain or invoking the
actual RPC on the underlying Channel. It is the interceptor's
responsibility to call it if it decides to move the RPC forward.
The interceptor can use
`call = await continuation(client_call_details, request)`
to continue with the RPC. `continuation` returns the call to the
RPC.
client_call_details: A ClientCallDetails object describing the
outgoing RPC.
request: The request value for the RPC.
Returns:
The RPC Call or an asynchronous iterator.
Raises:
AioRpcError: Indicating that the RPC terminated with non-OK status.
asyncio.CancelledError: Indicating that the RPC was canceled.
"""
class StreamUnaryClientInterceptor(ClientInterceptor, metaclass=ABCMeta):
"""Affords intercepting stream-unary invocations."""
@abstractmethod
async def intercept_stream_unary(
self,
continuation: Callable[[ClientCallDetails, RequestType],
StreamUnaryCall],
client_call_details: ClientCallDetails,
request_iterator: RequestIterableType,
) -> StreamUnaryCall:
"""Intercepts a stream-unary invocation asynchronously.
Within the interceptor the usage of the call methods like `write` or
even awaiting the call should be done carefully, since the caller
could be expecting an untouched call, for example for start writing
messages to it.
Args:
continuation: A coroutine that proceeds with the invocation by
executing the next interceptor in the chain or invoking the
actual RPC on the underlying Channel. It is the interceptor's
responsibility to call it if it decides to move the RPC forward.
The interceptor can use
`call = await continuation(client_call_details, request_iterator)`
to continue with the RPC. `continuation` returns the call to the
RPC.
client_call_details: A ClientCallDetails object describing the
outgoing RPC.
request_iterator: The request iterator that will produce requests
for the RPC.
Returns:
The RPC Call.
Raises:
AioRpcError: Indicating that the RPC terminated with non-OK status.
asyncio.CancelledError: Indicating that the RPC was canceled.
"""
class StreamStreamClientInterceptor(ClientInterceptor, metaclass=ABCMeta):
"""Affords intercepting stream-stream invocations."""
@abstractmethod
async def intercept_stream_stream(
self,
continuation: Callable[[ClientCallDetails, RequestType],
StreamStreamCall],
client_call_details: ClientCallDetails,
request_iterator: RequestIterableType,
) -> Union[ResponseIterableType, StreamStreamCall]:
"""Intercepts a stream-stream invocation asynchronously.
Within the interceptor the usage of the call methods like `write` or
even awaiting the call should be done carefully, since the caller
could be expecting an untouched call, for example for start writing
messages to it.
The function could return the call object or an asynchronous
iterator, in case of being an asyncrhonous iterator this will
become the source of the reads done by the caller.
Args:
continuation: A coroutine that proceeds with the invocation by
executing the next interceptor in the chain or invoking the
actual RPC on the underlying Channel. It is the interceptor's
responsibility to call it if it decides to move the RPC forward.
The interceptor can use
`call = await continuation(client_call_details, request_iterator)`
to continue with the RPC. `continuation` returns the call to the
RPC.
client_call_details: A ClientCallDetails object describing the
outgoing RPC.
request_iterator: The request iterator that will produce requests
for the RPC.
Returns:
The RPC Call or an asynchronous iterator.
Raises:
AioRpcError: Indicating that the RPC terminated with non-OK status.
asyncio.CancelledError: Indicating that the RPC was canceled.
"""
class InterceptedCall:
"""Base implementation for all intercepted call arities.
Interceptors might have some work to do before the RPC invocation with
the capacity of changing the invocation parameters, and some work to do
after the RPC invocation with the capacity for accessing to the wrapped
`UnaryUnaryCall`.
It handles also early and later cancellations, when the RPC has not even
started and the execution is still held by the interceptors or when the
RPC has finished but again the execution is still held by the interceptors.
Once the RPC is finally executed, all methods are finally done against the
intercepted call, being at the same time the same call returned to the
interceptors.
As a base class for all of the interceptors implements the logic around
final status, metadata and cancellation.
"""
_interceptors_task: asyncio.Task
_pending_add_done_callbacks: Sequence[DoneCallbackType]
def __init__(self, interceptors_task: asyncio.Task) -> None:
self._interceptors_task = interceptors_task
self._pending_add_done_callbacks = []
self._interceptors_task.add_done_callback(
self._fire_or_add_pending_done_callbacks)
def __del__(self):
self.cancel()
def _fire_or_add_pending_done_callbacks(
self, interceptors_task: asyncio.Task) -> None:
if not self._pending_add_done_callbacks:
return
call_completed = False
try:
call = interceptors_task.result()
if call.done():
call_completed = True
except (AioRpcError, asyncio.CancelledError):
call_completed = True
if call_completed:
for callback in self._pending_add_done_callbacks:
callback(self)
else:
for callback in self._pending_add_done_callbacks:
callback = functools.partial(self._wrap_add_done_callback,
callback)
call.add_done_callback(callback)
self._pending_add_done_callbacks = []
def _wrap_add_done_callback(self, callback: DoneCallbackType,
unused_call: _base_call.Call) -> None:
callback(self)
def cancel(self) -> bool:
if not self._interceptors_task.done():
# There is no yet the intercepted call available,
# Trying to cancel it by using the generic Asyncio
# cancellation method.
return self._interceptors_task.cancel()
try:
call = self._interceptors_task.result()
except AioRpcError:
return False
except asyncio.CancelledError:
return False
return call.cancel()
def cancelled(self) -> bool:
if not self._interceptors_task.done():
return False
try:
call = self._interceptors_task.result()
except AioRpcError as err:
return err.code() == grpc.StatusCode.CANCELLED
except asyncio.CancelledError:
return True
return call.cancelled()
def done(self) -> bool:
if not self._interceptors_task.done():
return False
try:
call = self._interceptors_task.result()
except (AioRpcError, asyncio.CancelledError):
return True
return call.done()
def add_done_callback(self, callback: DoneCallbackType) -> None:
if not self._interceptors_task.done():
self._pending_add_done_callbacks.append(callback)
return
try:
call = self._interceptors_task.result()
except (AioRpcError, asyncio.CancelledError):
callback(self)
return
if call.done():
callback(self)
else:
callback = functools.partial(self._wrap_add_done_callback, callback)
call.add_done_callback(callback)
def time_remaining(self) -> Optional[float]:
raise NotImplementedError()
async def initial_metadata(self) -> Optional[Metadata]:
try:
call = await self._interceptors_task
except AioRpcError as err:
return err.initial_metadata()
except asyncio.CancelledError:
return None
return await call.initial_metadata()
async def trailing_metadata(self) -> Optional[Metadata]:
try:
call = await self._interceptors_task
except AioRpcError as err:
return err.trailing_metadata()
except asyncio.CancelledError:
return None
return await call.trailing_metadata()
async def code(self) -> grpc.StatusCode:
try:
call = await self._interceptors_task
except AioRpcError as err:
return err.code()
except asyncio.CancelledError:
return grpc.StatusCode.CANCELLED
return await call.code()
async def details(self) -> str:
try:
call = await self._interceptors_task
except AioRpcError as err:
return err.details()
except asyncio.CancelledError:
return _LOCAL_CANCELLATION_DETAILS
return await call.details()
async def debug_error_string(self) -> Optional[str]:
try:
call = await self._interceptors_task
except AioRpcError as err:
return err.debug_error_string()
except asyncio.CancelledError:
return ''
return await call.debug_error_string()
async def wait_for_connection(self) -> None:
call = await self._interceptors_task
return await call.wait_for_connection()
class _InterceptedUnaryResponseMixin:
def __await__(self):
call = yield from self._interceptors_task.__await__()
response = yield from call.__await__()
return response
class _InterceptedStreamResponseMixin:
_response_aiter: Optional[AsyncIterable[ResponseType]]
def _init_stream_response_mixin(self) -> None:
# Is initalized later, otherwise if the iterator is not finnally
# consumed a logging warning is emmited by Asyncio.
self._response_aiter = None
async def _wait_for_interceptor_task_response_iterator(
self) -> ResponseType:
call = await self._interceptors_task
async for response in call:
yield response
def __aiter__(self) -> AsyncIterable[ResponseType]:
if self._response_aiter is None:
self._response_aiter = self._wait_for_interceptor_task_response_iterator(
)
return self._response_aiter
async def read(self) -> ResponseType:
if self._response_aiter is None:
self._response_aiter = self._wait_for_interceptor_task_response_iterator(
)
return await self._response_aiter.asend(None)
class _InterceptedStreamRequestMixin:
_write_to_iterator_async_gen: Optional[AsyncIterable[RequestType]]
_write_to_iterator_queue: Optional[asyncio.Queue]
_FINISH_ITERATOR_SENTINEL = object()
def _init_stream_request_mixin(
self, request_iterator: Optional[RequestIterableType]
) -> RequestIterableType:
if request_iterator is None:
# We provide our own request iterator which is a proxy
# of the futures writes that will be done by the caller.
self._write_to_iterator_queue = asyncio.Queue(maxsize=1)
self._write_to_iterator_async_gen = self._proxy_writes_as_request_iterator(
)
request_iterator = self._write_to_iterator_async_gen
else:
self._write_to_iterator_queue = None
return request_iterator
async def _proxy_writes_as_request_iterator(self):
await self._interceptors_task
while True:
value = await self._write_to_iterator_queue.get()
if value is _InterceptedStreamRequestMixin._FINISH_ITERATOR_SENTINEL:
break
yield value
async def write(self, request: RequestType) -> None:
# If no queue was created it means that requests
# should be expected through an iterators provided
# by the caller.
if self._write_to_iterator_queue is None:
raise cygrpc.UsageError(_API_STYLE_ERROR)
try:
call = await self._interceptors_task
except (asyncio.CancelledError, AioRpcError):
raise asyncio.InvalidStateError(_RPC_ALREADY_FINISHED_DETAILS)
if call.done():
raise asyncio.InvalidStateError(_RPC_ALREADY_FINISHED_DETAILS)
elif call._done_writing_flag:
raise asyncio.InvalidStateError(_RPC_HALF_CLOSED_DETAILS)
# Write might never end up since the call could abrubtly finish,
# we give up on the first awaitable object that finishes.
_, _ = await asyncio.wait(
(self._write_to_iterator_queue.put(request), call.code()),
return_when=asyncio.FIRST_COMPLETED)
if call.done():
raise asyncio.InvalidStateError(_RPC_ALREADY_FINISHED_DETAILS)
async def done_writing(self) -> None:
"""Signal peer that client is done writing.
This method is idempotent.
"""
# If no queue was created it means that requests
# should be expected through an iterators provided
# by the caller.
if self._write_to_iterator_queue is None:
raise cygrpc.UsageError(_API_STYLE_ERROR)
try:
call = await self._interceptors_task
except asyncio.CancelledError:
raise asyncio.InvalidStateError(_RPC_ALREADY_FINISHED_DETAILS)
# Write might never end up since the call could abrubtly finish,
# we give up on the first awaitable object that finishes.
_, _ = await asyncio.wait((self._write_to_iterator_queue.put(
_InterceptedStreamRequestMixin._FINISH_ITERATOR_SENTINEL),
call.code()),
return_when=asyncio.FIRST_COMPLETED)
class InterceptedUnaryUnaryCall(_InterceptedUnaryResponseMixin, InterceptedCall,
_base_call.UnaryUnaryCall):
"""Used for running a `UnaryUnaryCall` wrapped by interceptors.
For the `__await__` method is it is proxied to the intercepted call only when
the interceptor task is finished.
"""
_loop: asyncio.AbstractEventLoop
_channel: cygrpc.AioChannel
# pylint: disable=too-many-arguments
def __init__(self, interceptors: Sequence[UnaryUnaryClientInterceptor],
request: RequestType, timeout: Optional[float],
metadata: Metadata,
credentials: Optional[grpc.CallCredentials],
wait_for_ready: Optional[bool], channel: cygrpc.AioChannel,
method: bytes, request_serializer: SerializingFunction,
response_deserializer: DeserializingFunction,
loop: asyncio.AbstractEventLoop) -> None:
self._loop = loop
self._channel = channel
interceptors_task = loop.create_task(
self._invoke(interceptors, method, timeout, metadata, credentials,
wait_for_ready, request, request_serializer,
response_deserializer))
super().__init__(interceptors_task)
# pylint: disable=too-many-arguments
async def _invoke(
self, interceptors: Sequence[UnaryUnaryClientInterceptor],
method: bytes, timeout: Optional[float],
metadata: Optional[Metadata],
credentials: Optional[grpc.CallCredentials],
wait_for_ready: Optional[bool], request: RequestType,
request_serializer: SerializingFunction,
response_deserializer: DeserializingFunction) -> UnaryUnaryCall:
"""Run the RPC call wrapped in interceptors"""
async def _run_interceptor(
interceptors: Iterator[UnaryUnaryClientInterceptor],
client_call_details: ClientCallDetails,
request: RequestType) -> _base_call.UnaryUnaryCall:
interceptor = next(interceptors, None)
if interceptor:
continuation = functools.partial(_run_interceptor, interceptors)
call_or_response = await interceptor.intercept_unary_unary(
continuation, client_call_details, request)
if isinstance(call_or_response, _base_call.UnaryUnaryCall):
return call_or_response
else:
return UnaryUnaryCallResponse(call_or_response)
else:
return UnaryUnaryCall(
request, _timeout_to_deadline(client_call_details.timeout),
client_call_details.metadata,
client_call_details.credentials,
client_call_details.wait_for_ready, self._channel,
client_call_details.method, request_serializer,
response_deserializer, self._loop)
client_call_details = ClientCallDetails(method, timeout, metadata,
credentials, wait_for_ready)
return await _run_interceptor(iter(interceptors), client_call_details,
request)
def time_remaining(self) -> Optional[float]:
raise NotImplementedError()
class InterceptedUnaryStreamCall(_InterceptedStreamResponseMixin,
InterceptedCall, _base_call.UnaryStreamCall):
"""Used for running a `UnaryStreamCall` wrapped by interceptors."""
_loop: asyncio.AbstractEventLoop
_channel: cygrpc.AioChannel
_last_returned_call_from_interceptors = Optional[_base_call.UnaryStreamCall]
# pylint: disable=too-many-arguments
def __init__(self, interceptors: Sequence[UnaryStreamClientInterceptor],
request: RequestType, timeout: Optional[float],
metadata: Metadata,
credentials: Optional[grpc.CallCredentials],
wait_for_ready: Optional[bool], channel: cygrpc.AioChannel,
method: bytes, request_serializer: SerializingFunction,
response_deserializer: DeserializingFunction,
loop: asyncio.AbstractEventLoop) -> None:
self._loop = loop
self._channel = channel
self._init_stream_response_mixin()
self._last_returned_call_from_interceptors = None
interceptors_task = loop.create_task(
self._invoke(interceptors, method, timeout, metadata, credentials,
wait_for_ready, request, request_serializer,
response_deserializer))
super().__init__(interceptors_task)
# pylint: disable=too-many-arguments
async def _invoke(
self, interceptors: Sequence[UnaryUnaryClientInterceptor],
method: bytes, timeout: Optional[float],
metadata: Optional[Metadata],
credentials: Optional[grpc.CallCredentials],
wait_for_ready: Optional[bool], request: RequestType,
request_serializer: SerializingFunction,
response_deserializer: DeserializingFunction) -> UnaryStreamCall:
"""Run the RPC call wrapped in interceptors"""
async def _run_interceptor(
interceptors: Iterator[UnaryStreamClientInterceptor],
client_call_details: ClientCallDetails,
request: RequestType,
) -> _base_call.UnaryUnaryCall:
interceptor = next(interceptors, None)
if interceptor:
continuation = functools.partial(_run_interceptor, interceptors)
call_or_response_iterator = await interceptor.intercept_unary_stream(
continuation, client_call_details, request)
if isinstance(call_or_response_iterator,
_base_call.UnaryStreamCall):
self._last_returned_call_from_interceptors = call_or_response_iterator
else:
self._last_returned_call_from_interceptors = UnaryStreamCallResponseIterator(
self._last_returned_call_from_interceptors,
call_or_response_iterator)
return self._last_returned_call_from_interceptors
else:
self._last_returned_call_from_interceptors = UnaryStreamCall(
request, _timeout_to_deadline(client_call_details.timeout),
client_call_details.metadata,
client_call_details.credentials,
client_call_details.wait_for_ready, self._channel,
client_call_details.method, request_serializer,
response_deserializer, self._loop)
return self._last_returned_call_from_interceptors
client_call_details = ClientCallDetails(method, timeout, metadata,
credentials, wait_for_ready)
return await _run_interceptor(iter(interceptors), client_call_details,
request)
def time_remaining(self) -> Optional[float]:
raise NotImplementedError()
class InterceptedStreamUnaryCall(_InterceptedUnaryResponseMixin,
_InterceptedStreamRequestMixin,
InterceptedCall, _base_call.StreamUnaryCall):
"""Used for running a `StreamUnaryCall` wrapped by interceptors.
For the `__await__` method is it is proxied to the intercepted call only when
the interceptor task is finished.
"""
_loop: asyncio.AbstractEventLoop
_channel: cygrpc.AioChannel
# pylint: disable=too-many-arguments
def __init__(self, interceptors: Sequence[StreamUnaryClientInterceptor],
request_iterator: Optional[RequestIterableType],
timeout: Optional[float], metadata: Metadata,
credentials: Optional[grpc.CallCredentials],
wait_for_ready: Optional[bool], channel: cygrpc.AioChannel,
method: bytes, request_serializer: SerializingFunction,
response_deserializer: DeserializingFunction,
loop: asyncio.AbstractEventLoop) -> None:
self._loop = loop
self._channel = channel
request_iterator = self._init_stream_request_mixin(request_iterator)
interceptors_task = loop.create_task(
self._invoke(interceptors, method, timeout, metadata, credentials,
wait_for_ready, request_iterator, request_serializer,
response_deserializer))
super().__init__(interceptors_task)
# pylint: disable=too-many-arguments
async def _invoke(
self, interceptors: Sequence[StreamUnaryClientInterceptor],
method: bytes, timeout: Optional[float],
metadata: Optional[Metadata],
credentials: Optional[grpc.CallCredentials],
wait_for_ready: Optional[bool],
request_iterator: RequestIterableType,
request_serializer: SerializingFunction,
response_deserializer: DeserializingFunction) -> StreamUnaryCall:
"""Run the RPC call wrapped in interceptors"""
async def _run_interceptor(
interceptors: Iterator[UnaryUnaryClientInterceptor],
client_call_details: ClientCallDetails,
request_iterator: RequestIterableType
) -> _base_call.StreamUnaryCall:
interceptor = next(interceptors, None)
if interceptor:
continuation = functools.partial(_run_interceptor, interceptors)
return await interceptor.intercept_stream_unary(
continuation, client_call_details, request_iterator)
else:
return StreamUnaryCall(
request_iterator,
_timeout_to_deadline(client_call_details.timeout),
client_call_details.metadata,
client_call_details.credentials,
client_call_details.wait_for_ready, self._channel,
client_call_details.method, request_serializer,
response_deserializer, self._loop)
client_call_details = ClientCallDetails(method, timeout, metadata,
credentials, wait_for_ready)
return await _run_interceptor(iter(interceptors), client_call_details,
request_iterator)
def time_remaining(self) -> Optional[float]:
raise NotImplementedError()
class InterceptedStreamStreamCall(_InterceptedStreamResponseMixin,
_InterceptedStreamRequestMixin,
InterceptedCall, _base_call.StreamStreamCall):
"""Used for running a `StreamStreamCall` wrapped by interceptors."""
_loop: asyncio.AbstractEventLoop
_channel: cygrpc.AioChannel
_last_returned_call_from_interceptors = Optional[_base_call.UnaryStreamCall]
# pylint: disable=too-many-arguments
def __init__(self, interceptors: Sequence[StreamStreamClientInterceptor],
request_iterator: Optional[RequestIterableType],
timeout: Optional[float], metadata: Metadata,
credentials: Optional[grpc.CallCredentials],
wait_for_ready: Optional[bool], channel: cygrpc.AioChannel,
method: bytes, request_serializer: SerializingFunction,
response_deserializer: DeserializingFunction,
loop: asyncio.AbstractEventLoop) -> None:
self._loop = loop
self._channel = channel
self._init_stream_response_mixin()
request_iterator = self._init_stream_request_mixin(request_iterator)
self._last_returned_call_from_interceptors = None
interceptors_task = loop.create_task(
self._invoke(interceptors, method, timeout, metadata, credentials,
wait_for_ready, request_iterator, request_serializer,
response_deserializer))
super().__init__(interceptors_task)
# pylint: disable=too-many-arguments
async def _invoke(
self, interceptors: Sequence[StreamStreamClientInterceptor],
method: bytes, timeout: Optional[float],
metadata: Optional[Metadata],
credentials: Optional[grpc.CallCredentials],
wait_for_ready: Optional[bool],
request_iterator: RequestIterableType,
request_serializer: SerializingFunction,
response_deserializer: DeserializingFunction) -> StreamStreamCall:
"""Run the RPC call wrapped in interceptors"""
async def _run_interceptor(
interceptors: Iterator[StreamStreamClientInterceptor],
client_call_details: ClientCallDetails,
request_iterator: RequestIterableType
) -> _base_call.StreamStreamCall:
interceptor = next(interceptors, None)
if interceptor:
continuation = functools.partial(_run_interceptor, interceptors)
call_or_response_iterator = await interceptor.intercept_stream_stream(
continuation, client_call_details, request_iterator)
if isinstance(call_or_response_iterator,
_base_call.StreamStreamCall):
self._last_returned_call_from_interceptors = call_or_response_iterator
else:
self._last_returned_call_from_interceptors = StreamStreamCallResponseIterator(
self._last_returned_call_from_interceptors,
call_or_response_iterator)
return self._last_returned_call_from_interceptors
else:
self._last_returned_call_from_interceptors = StreamStreamCall(
request_iterator,
_timeout_to_deadline(client_call_details.timeout),
client_call_details.metadata,
client_call_details.credentials,
client_call_details.wait_for_ready, self._channel,
client_call_details.method, request_serializer,
response_deserializer, self._loop)
return self._last_returned_call_from_interceptors
client_call_details = ClientCallDetails(method, timeout, metadata,
credentials, wait_for_ready)
return await _run_interceptor(iter(interceptors), client_call_details,
request_iterator)
def time_remaining(self) -> Optional[float]:
raise NotImplementedError()
class UnaryUnaryCallResponse(_base_call.UnaryUnaryCall):
"""Final UnaryUnaryCall class finished with a response."""
_response: ResponseType
def __init__(self, response: ResponseType) -> None:
self._response = response
def cancel(self) -> bool:
return False
def cancelled(self) -> bool:
return False
def done(self) -> bool:
return True
def add_done_callback(self, unused_callback) -> None:
raise NotImplementedError()
def time_remaining(self) -> Optional[float]:
raise NotImplementedError()
async def initial_metadata(self) -> Optional[Metadata]:
return None
async def trailing_metadata(self) -> Optional[Metadata]:
return None
async def code(self) -> grpc.StatusCode:
return grpc.StatusCode.OK
async def details(self) -> str:
return ''
async def debug_error_string(self) -> Optional[str]:
return None
def __await__(self):
if False: # pylint: disable=using-constant-test
# This code path is never used, but a yield statement is needed
# for telling the interpreter that __await__ is a generator.
yield None
return self._response
async def wait_for_connection(self) -> None:
pass
class _StreamCallResponseIterator:
_call: Union[_base_call.UnaryStreamCall, _base_call.StreamStreamCall]
_response_iterator: AsyncIterable[ResponseType]
def __init__(self, call: Union[_base_call.UnaryStreamCall,
_base_call.StreamStreamCall],
response_iterator: AsyncIterable[ResponseType]) -> None:
self._response_iterator = response_iterator
self._call = call
def cancel(self) -> bool:
return self._call.cancel()
def cancelled(self) -> bool:
return self._call.cancelled()
def done(self) -> bool:
return self._call.done()
def add_done_callback(self, callback) -> None:
self._call.add_done_callback(callback)
def time_remaining(self) -> Optional[float]:
return self._call.time_remaining()
async def initial_metadata(self) -> Optional[Metadata]:
return await self._call.initial_metadata()
async def trailing_metadata(self) -> Optional[Metadata]:
return await self._call.trailing_metadata()
async def code(self) -> grpc.StatusCode:
return await self._call.code()
async def details(self) -> str:
return await self._call.details()
async def debug_error_string(self) -> Optional[str]:
return await self._call.debug_error_string()
def __aiter__(self):
return self._response_iterator.__aiter__()
async def wait_for_connection(self) -> None:
return await self._call.wait_for_connection()
class UnaryStreamCallResponseIterator(_StreamCallResponseIterator,
_base_call.UnaryStreamCall):
"""UnaryStreamCall class wich uses an alternative response iterator."""
async def read(self) -> ResponseType:
# Behind the scenes everyting goes through the
# async iterator. So this path should not be reached.
raise NotImplementedError()
class StreamStreamCallResponseIterator(_StreamCallResponseIterator,
_base_call.StreamStreamCall):
"""StreamStreamCall class wich uses an alternative response iterator."""
async def read(self) -> ResponseType:
# Behind the scenes everyting goes through the
# async iterator. So this path should not be reached.
raise NotImplementedError()
async def write(self, request: RequestType) -> None:
# Behind the scenes everyting goes through the
# async iterator provided by the InterceptedStreamStreamCall.
# So this path should not be reached.
raise NotImplementedError()
async def done_writing(self) -> None:
# Behind the scenes everyting goes through the
# async iterator provided by the InterceptedStreamStreamCall.
# So this path should not be reached.
raise NotImplementedError()
@property
def _done_writing_flag(self) -> bool:
return self._call._done_writing_flag
|
py | b40aa99520d1d9bee4b31590ab4232daba455b98 | """The exceptions used by Home Assistant."""
from __future__ import annotations
from typing import TYPE_CHECKING, Generator, Sequence
import attr
if TYPE_CHECKING:
from .core import Context
class HomeAssistantError(Exception):
"""General Home Assistant exception occurred."""
class InvalidEntityFormatError(HomeAssistantError):
"""When an invalid formatted entity is encountered."""
class NoEntitySpecifiedError(HomeAssistantError):
"""When no entity is specified."""
class TemplateError(HomeAssistantError):
"""Error during template rendering."""
def __init__(self, exception: Exception) -> None:
"""Init the error."""
super().__init__(f"{exception.__class__.__name__}: {exception}")
@attr.s
class ConditionError(HomeAssistantError):
"""Error during condition evaluation."""
# The type of the failed condition, such as 'and' or 'numeric_state'
type: str = attr.ib()
@staticmethod
def _indent(indent: int, message: str) -> str:
"""Return indentation."""
return " " * indent + message
def output(self, indent: int) -> Generator:
"""Yield an indented representation."""
raise NotImplementedError()
def __str__(self) -> str:
"""Return string representation."""
return "\n".join(list(self.output(indent=0)))
@attr.s
class ConditionErrorMessage(ConditionError):
"""Condition error message."""
# A message describing this error
message: str = attr.ib()
def output(self, indent: int) -> Generator:
"""Yield an indented representation."""
yield self._indent(indent, f"In '{self.type}' condition: {self.message}")
@attr.s
class ConditionErrorIndex(ConditionError):
"""Condition error with index."""
# The zero-based index of the failed condition, for conditions with multiple parts
index: int = attr.ib()
# The total number of parts in this condition, including non-failed parts
total: int = attr.ib()
# The error that this error wraps
error: ConditionError = attr.ib()
def output(self, indent: int) -> Generator:
"""Yield an indented representation."""
if self.total > 1:
yield self._indent(
indent, f"In '{self.type}' (item {self.index+1} of {self.total}):"
)
else:
yield self._indent(indent, f"In '{self.type}':")
yield from self.error.output(indent + 1)
@attr.s
class ConditionErrorContainer(ConditionError):
"""Condition error with subconditions."""
# List of ConditionErrors that this error wraps
errors: Sequence[ConditionError] = attr.ib()
def output(self, indent: int) -> Generator:
"""Yield an indented representation."""
for item in self.errors:
yield from item.output(indent)
class PlatformNotReady(HomeAssistantError):
"""Error to indicate that platform is not ready."""
class ConfigEntryNotReady(HomeAssistantError):
"""Error to indicate that config entry is not ready."""
class InvalidStateError(HomeAssistantError):
"""When an invalid state is encountered."""
class Unauthorized(HomeAssistantError):
"""When an action is unauthorized."""
def __init__(
self,
context: Context | None = None,
user_id: str | None = None,
entity_id: str | None = None,
config_entry_id: str | None = None,
perm_category: str | None = None,
permission: str | None = None,
) -> None:
"""Unauthorized error."""
super().__init__(self.__class__.__name__)
self.context = context
if user_id is None and context is not None:
user_id = context.user_id
self.user_id = user_id
self.entity_id = entity_id
self.config_entry_id = config_entry_id
# Not all actions have an ID (like adding config entry)
# We then use this fallback to know what category was unauth
self.perm_category = perm_category
self.permission = permission
class UnknownUser(Unauthorized):
"""When call is made with user ID that doesn't exist."""
class ServiceNotFound(HomeAssistantError):
"""Raised when a service is not found."""
def __init__(self, domain: str, service: str) -> None:
"""Initialize error."""
super().__init__(self, f"Service {domain}.{service} not found")
self.domain = domain
self.service = service
def __str__(self) -> str:
"""Return string representation."""
return f"Unable to find service {self.domain}.{self.service}"
class MaxLengthExceeded(HomeAssistantError):
"""Raised when a property value has exceeded the max character length."""
def __init__(self, value: str, property_name: str, max_length: int) -> None:
"""Initialize error."""
super().__init__(
self,
(
f"Value {value} for property {property_name} has a max length of "
f"{max_length} characters"
),
)
self.value = value
self.property_name = property_name
self.max_length = max_length
|
py | b40aaa31ac773dbd8020a2c6a6873aaf007cceb0 | import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import plotly.graph_objects as go
from visualization import radar_capability, radar_capability_comparison, \
off_def_plot, team_summary, scale_player, scale_team
import pickle
from data import fantasy_team_stats, Player_ODFAI, results
from data import C_list, PF_list, PG_list, SF_list, SG_list
from dash.exceptions import PreventUpdate
# player_name = ["name1", "name2", "name3", "name4", "name5",
# "name6", "name7", "name8", "name9", "name10",
# "name11", "name12", "name13", "name14", "name15"]
# player1 = {"Offensive": 20, "Defensive": 15, "Aggression": 3, "Control":20, "Form Stability": 5}
# player2 = {"Offensive": 16, "Defensive": 30, "Aggression": 4, "Control":28, "Form Stability": 3}
# player3 = {"Offensive": 26, "Defensive": 20, "Aggression": 6, "Control":18, "Form Stability": 2}
# player4 = {"Offensive": 10, "Defensive": 40, "Aggression": 2, "Control":18, "Form Stability": 6}
# player5 = {"Offensive": 14, "Defensive": 30, "Aggression": 3, "Control":13, "Form Stability": 7}
# player6 = {"Offensive": 20, "Defensive": 15, "Aggression": 3, "Control":20, "Form Stability": 5}
# player7 = {"Offensive": 16, "Defensive": 30, "Aggression": 4, "Control":28, "Form Stability": 3}
# player8 = {"Offensive": 26, "Defensive": 20, "Aggression": 6, "Control":18, "Form Stability": 2}
# player9 = {"Offensive": 10, "Defensive": 40, "Aggression": 2, "Control":18, "Form Stability": 6}
# player10 = {"Offensive": 14, "Defensive": 30, "Aggression": 3, "Control":13, "Form Stability": 7}
# player11 = {"Offensive": 20, "Defensive": 15, "Aggression": 3, "Control":20, "Form Stability": 5}
# player12 = {"Offensive": 16, "Defensive": 30, "Aggression": 4, "Control":28, "Form Stability": 3}
# player13 = {"Offensive": 26, "Defensive": 20, "Aggression": 6, "Control":18, "Form Stability": 2}
# player14 = {"Offensive": 10, "Defensive": 40, "Aggression": 2, "Control":18, "Form Stability": 6}
# player15 = {"Offensive": 14, "Defensive": 30, "Aggression": 3, "Control":13, "Form Stability": 7}
#
# test = pd.DataFrame([player1, player2, player3, player4, player5,
# player6, player7, player8, player9, player10,
# player11, player12, player13, player14, player15], index=player_name)
#
player_cap = pd.read_csv('data/player_cap_df.csv', index_col="Player")
player_name = list(player_cap.index)
# player_df_scaled = scale_visualization_data(player_cap)
# player1_od = {"Offensive": 20, "Defensive": 15}
# player2_od = {"Offensive": 16, "Defensive": 30}
# player3_od = {"Offensive": 26, "Defensive": 20}
# player4_od = {"Offensive": 10, "Defensive": 40}
# player5_od = {"Offensive": 14, "Defensive": 30}
# player6_od = {"Offensive": 20, "Defensive": 22}
# player7_od = {"Offensive": 16, "Defensive": 17}
# player8_od = {"Offensive": 26, "Defensive": 23}
# player9_od = {"Offensive": 10, "Defensive": 25}
# player10_od = {"Offensive": 22, "Defensive": 19}
# player11_od = {"Offensive": 17, "Defensive": 18}
# player12_od = {"Offensive": 23, "Defensive": 23}
# player13_od = {"Offensive": 19, "Defensive": 26}
# player14_od = {"Offensive": 11, "Defensive": 22}
# player15_od = {"Offensive": 26, "Defensive": 16}
#
# test_od = pd.DataFrame([player1_od,player2_od,player3_od,player4_od,player5_od,
# player6_od, player7_od,player8_od,player9_od,player10_od,
# player11_od, player12_od,player13_od,player14_od,player15_od], index = player_name)
#
# player_od_df = test_od.copy()
#
# team1 = {"Offensive": 20, "Defensive": 15, "Aggression": 3, "Control":20, "Form Stability": 5, "Cooperation": 10}
player_od_df = player_cap[["Offense", "Defense"]]
################################################
COLORS = ['rgb(67,67,67)', 'rgb(115,115,115)', 'rgb(49,130,189)', 'rgb(189,189,189)']
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css', '/assets/coff.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
def page_header():
"""
Returns the page header as a dash `html.Div`
"""
return html.Div(id='header', children=[
html.Div([html.H3('Visualization and Simulation of NBA Player Capability Using 2019-2020 NBA Stats')],
className="ten columns"),
html.A([html.Img(id='logo', src=app.get_asset_url('jack.jpeg'),
style={'height': '35px', 'paddingTop': '7%', 'paddingRight': '300px'}),
html.Span('NBA Git Repo', style={'fontSize': '2rem', 'height': '35px', 'bottom': 0,
'paddingLeft': '1px', 'color': '#a3a7b0',
'textDecoration': 'none'})],
className="two columns row",
href='https://github.com/data1050projectfall2019/data1050project'),
html.B([html.Img(id='nba_logo', src=app.get_asset_url('nba-logo.jpg'),
style={'height': '100px', 'paddingTop': '1%'}),
html.Span(' ', style={'fontSize': '2rem', 'height': '100px', 'bottom': 0,
'paddingLeft': '4px', 'color': '#a3a7b0',
'textDecoration': 'none'})],
className="two columns row"),
html.Div(children=[dcc.Markdown('''
---- CTRL-C, CTRL-V:
[About Page](https://docs.google.com/document/d/1cE0z6fRTA5pGp01ROxbX_DoFLjuWJOGx4U9gCLJSLKk/edit?usp=sharing),
[Additional Details](https://docs.google.com/document/d/1gKH3nA29nzM36KF6Bn30TmFd7xrYvLhz_bOSfw_A8tc/edit?usp=sharing)
''', className='eleven columns', style={'paddingLeft': '5%'})], className="row")
], className="row")
def description():
"""
Returns overall project description
"""
return html.Div(children=[
dcc.Markdown('''
## NBA Player Capability Visualization
Shiyu Liu | Peter Huang | Yi Wang
All sports, regardless of the individual or team sports, create a large amount of data after even a single match.
Sports fans, media, bookmaker and team administrators investigate the data for multifarious needs. Since such
raw data merely are numbers that are hardly being comprehensible and interpretable, statistical analysis and
corresponding result visualization become the most crucial part when utilizing the data. In this project, we
aim to collect, store, analyze, and visualize NBA player match statistics. To provide users with more flexible
results, we expect our application to provide player capability visualization and comparisons based on users’
queries. This application also enables the users to build their own team, and simulate a match between two teams.
#### Data Source
Data is obtained from https://www.basketball-reference.com/leagues/NBA_2020_per_game.html.
** The data is updated everyday after all match have been completed on that day. For demonstration and future usage,
we update the data every 30 seconds**.
Our data include all registered players. You can select two of them to compare their performance below.
Please note: offense/defense consider the players' effect in team offense/defense as well as the overall
team offense/defense performance. Impact represents a player's contribution to the team and its influence
to the opponent. Scoring consider both players' efficiency and individual overall scores. Control represents
a player's stability and accuracy on the field. We recommend looking at a player's impact when evaluating its
influence in NBA.
''', className='eleven columns', style={'paddingLeft': '5%'})], className="row")
def select_one_or_two_player():
"""Select one or two players from the player list"""
return html.Div(children=[
html.Div(id='above_drop_down', style={'marginTop': '2rem'}),
html.Div(children=[
html.Div(children=[
dcc.Markdown('''Please select a player to visualize his performance:'''),
dcc.Dropdown(
options=[{"label": name, "value": name} for name in player_name],
value=[],
multi=False,
id="player1_name",
style={'height': '30px', 'width': '300px'}
)], style={'width': '300px', 'marginLeft': '90px', 'display': 'inline-block'}),
html.Div(children=[
dcc.Markdown('''Please select a second player to compare their performance:'''),
dcc.Dropdown(
options=[{"label": name, "value": name} for name in player_name],
value=[],
multi=False,
id="player2_name",
style={'height': '30px', 'width': '300px'}
)], style={'width': '300px', 'align': 'right', 'marginLeft': '400px', 'display': 'inline-block'})
]),
html.Div(id='below_first_drop_down', style={'marginTop': '2rem'}),
html.Div(children=[html.Div(children=[dcc.Graph(id='ranking1')], style={'width': '48%', 'align': 'right', 'display': 'inline-block'}),
html.Div(children=[dcc.Graph(id='ranking2')],
style={'width': '48%', 'align': 'right', 'display': 'inline-block'})], className='eleven columns'),
html.Div(id='below_drop_down', style={'marginTop': '2rem'}),
html.Div(children=[dcc.Graph(id='what-if-figure')], className='eleven columns'),
html.Div(id='below_visual', style={'marginTop': '2rem'})
], style={'marginLeft': '80px'})
def enhancement_description():
"""
Returns enhancement part description
"""
return html.Div(children=[dcc.Markdown('''
### Fantasy Team
Finishing viewing the players? Now select and build your team. You are able to build two teams, and our
application will be able to visualize the overall team summary using the radar plot. This plot is an
comprehensive estimation on the players you selected. After selecting all ten players, click 'simulate
the match', and you will able to view the match result as well as the scores.
''', className='eleven columns', style={'paddingLeft': '5%'})], className="row")
# c = ['name1', 'name2', 'name3']
# pf = ['name4', 'name5', 'name6']
# sf = ['name7', 'name8', 'name9']
# sg = ['name10', 'name11', 'name12']
# pg = ['name13', 'name14', 'name15']
c = C_list
pf = PF_list
sf = SF_list
sg = SG_list
pg = PG_list
def enhancement_team_what_if():
return html.Div(children=[
html.Div(children=[
html.Div(children=[
html.Div(children=[
dcc.Dropdown(
options=[{"label": name, "value": name} for name in c],
multi=False,
id="c",
placeholder="Select a Center",
style={'height': '20px', 'width': '500px'}
)
], style={'width': '200px', 'marginTop': '3.5rem'}),
html.Div(children=[
dcc.Dropdown(
options=[{"label": name, "value": name} for name in pf],
multi=False,
id="pf",
placeholder="Select a Power Forward",
style={'height': '20px', 'width': '500px'}
)
], style={'width': '200px', 'marginTop': '3.5rem'}),
html.Div(children=[
dcc.Dropdown(
options=[{"label": name, "value": name} for name in sf],
multi=False,
id="sf",
placeholder="Select a Small Forward",
style={'height': '20px', 'width': '500px'}
)
], style={'width': '200px', 'marginTop': '3.5rem'}),
html.Div(children=[
dcc.Dropdown(
options=[{"label": name, "value": name} for name in sg],
multi=False,
id="sg",
placeholder="Select a Shooting Guard",
style={'height': '20px', 'width': '500px'}
)
], style={'width': '200px', 'marginTop': '3.5rem'}),
html.Div(children=[
dcc.Dropdown(
options=[{"label": name, "value": name} for name in pg],
multi=False,
id="pg",
placeholder="Select a Point Guard",
style={'height': '0px', 'width': '500px'}
)
], style={'width': '200px', 'marginTop': '3.5rem'})], style={'marginTop': '3.5rem', 'marginLeft': '80px', 'display': 'inline-block'}),
html.Div(children=[
html.Div(children=[
dcc.Dropdown(
options=[{"label": name, "value": name} for name in c],
multi=False,
id="c2",
placeholder="Select a Center",
style={'height': '20px', 'width': '500px'}
)
], style={'width': '200px', 'marginTop': '3.5rem'}),
html.Div(children=[
dcc.Dropdown(
options=[{"label": name, "value": name} for name in pf],
multi=False,
id="pf2",
placeholder="Select a Power Forward",
style={'height': '20px', 'width': '500px'}
)
], style={'width': '200px', 'marginTop': '3.5rem'}),
html.Div(children=[
dcc.Dropdown(
options=[{"label": name, "value": name} for name in sf],
multi=False,
id="sf2",
placeholder="Select a Small Forward",
style={'height': '20px', 'width': '500px'}
)
], style={'width': '200px', 'marginTop': '3.5rem'}),
html.Div(children=[
dcc.Dropdown(
options=[{"label": name, "value": name} for name in sg],
multi=False,
id="sg2",
placeholder="Select a Shooting Guard",
style={'height': '20px', 'width': '500px'}
)
], style={'width': '200px', 'marginTop': '3.5rem'}),
html.Div(children=[
dcc.Dropdown(
options=[{"label": name, "value": name} for name in pg],
multi=False,
id="pg2",
placeholder="Select a Point Guard",
style={'height': '0px', 'width': '500px'}
)
], style={'marginTop': '3.5rem'})],
style={'width': '300px', 'align': 'right', 'marginLeft': '450px', 'display': 'inline-block'}),
]),
html.Div(id='below 2 team', style={'marginTop': '8rem'}),
html.Div(children=[html.Div(children=[dcc.Graph(id='team_summary1')],
style={'width': '42%', 'align': 'right', 'display': 'inline-block'}),
html.Div(children=[dcc.Graph(id='team_summary2')],
style={'width': '42%', 'align': 'right', 'display': 'inline-block'})],
className='eleven columns')
], style={'marginLeft': '80px'})
def start_match():
return html.Div(children=[
html.Div(id='above the button', style={'marginTop': '8rem'}),
html.Button('Simulate the Match!', id='button', style={'marginLeft': '450px', 'color': 'white', "backgroundColor":'blue'}),
html.Div(id='output-container-button',
children='After selecting all players, start the match and view the result',
style={'marginLeft': '350px', 'color': 'white', 'fontSize': '2rem'})
], style={'marginLeft': '80px'})
def architecture_summary():
"""
Returns the text and image of architecture summary of the project.
"""
return html.Div(children=[
dcc.Markdown('''
# Project Architecture
This project uses MongoDB as the database. All data acquired are stored in raw form to the
database (with de-duplication). An abstract layer is built in `database.py` so all queries
can be done via function call. A `dash` app and `get_data.py` file are serving this web page
through. Actions on responsive components on the page is redirected to `app.py` which will
then update certain components on the page. Such operations are supported by `data.py` and
'visualization.py' for processing and calculation.
''', className='row eleven columns', style={'paddingLeft': '5%'}),
html.Div(children=[
html.Img(
# src="https://docs.google.com/drawings/d/e/2PACX-1vQNerIIsLZU2zMdRhIl3ZZkDMIt7jhE_fjZ6ZxhnJ9bKe1emPcjI92lT5L7aZRYVhJgPZ7EURN0AqRh/pub?w=670&h=457",
src="assets/archi.png",
className='row'),
], className='row', style={'display': 'inline-block', 'width': '48%'}),
dcc.Markdown('''
''')
], className='row')
app.layout = html.Div([
page_header(),
html.Hr(),
description(),
select_one_or_two_player(),
enhancement_description(),
enhancement_team_what_if(),
start_match(),
architecture_summary()
])
@app.callback(
dash.dependencies.Output('ranking1', 'figure'),
[dash.dependencies.Input("player1_name", 'value')]
)
def display_ranking1(player1):
if player1:
o, d, cur = off_def_plot(player1, player_od_df)
return cur
else:
fig = go.Figure()
fig.update_layout(template="plotly_dark", width=500, height=280,
title="No Info", xaxis=dict(showticklabels=False),
yaxis=dict(showticklabels=False))
return fig
@app.callback(
dash.dependencies.Output('ranking2', 'figure'),
[dash.dependencies.Input("player2_name", 'value')]
)
def display_ranking2(player2):
if player2:
o, d, cur = off_def_plot(player2, player_od_df)
return cur
else:
fig = go.Figure()
fig.update_layout(template="plotly_dark", width=500,
height=280, title="No Info", xaxis=dict(showticklabels=False),
yaxis=dict(showticklabels=False))
return fig
@app.callback(
dash.dependencies.Output('what-if-figure', 'figure'),
[dash.dependencies.Input("player1_name", 'value'),
dash.dependencies.Input("player2_name", 'value')]
)
def what_if_handler(player1, player2):
"""Changes the display graph based on player input"""
if player1 and player2:
player1_result, feature_list = scale_player(player1, Player_ODFAI(player1))
player2_result, feature_list = scale_player(player2, Player_ODFAI(player2))
cur = radar_capability_comparison(feature_list, player1_result,
player2_result, [player1, player2])
elif player1:
player1_result, feature_list = scale_player(player1, Player_ODFAI(player1))
cur = radar_capability(feature_list, player1_result, player1)
elif player2:
player2_result, feature_list = scale_player(player2, Player_ODFAI(player2))
cur = radar_capability(feature_list, player2_result, player2)
else:
cur = radar_capability(['Control', 'Defensive', 'Impact', 'Offense', 'Scoring'], [-0.1, -0.1, -0.1, -0.1, -0.1], "No Player Info")
return cur
@app.callback(
dash.dependencies.Output('team_summary1', 'figure'),
[dash.dependencies.Input("pg", 'value'),
dash.dependencies.Input("sg", 'value'),
dash.dependencies.Input("sf", 'value'),
dash.dependencies.Input("pf", 'value'),
dash.dependencies.Input("c", 'value')]
)
def display_team1(name1, name2, name3, name4, name5):
t0 = {"Offense": -5, "Defense": -5, "Scoring": -5, "Control": -5, "Impact": -5, "Tacit": -5, "Shooting": -5}
if name1 or name2 or name3 or name4 or name5:
summary0 = fantasy_team_stats(name1, name2, name3, name4, name5)
summary1 = scale_team("TEAM 1", summary0)
cur = team_summary(summary1, "TEAM 1")
else:
cur = team_summary(t0, "TEAM 1")
return cur
@app.callback(
dash.dependencies.Output('team_summary2', 'figure'),
[dash.dependencies.Input("pg2", 'value'),
dash.dependencies.Input("sg2", 'value'),
dash.dependencies.Input("sf2", 'value'),
dash.dependencies.Input("pf2", 'value'),
dash.dependencies.Input("c2", 'value')]
)
def display_team2(name1, name2, name3, name4, name5):
t0 = {"Offense": -5, "Defense": -5, "Scoring": -5, "Control": -5, "Impact": -5, "Tacit": -5, "Shooting": -5}
if name1 or name2 or name3 or name4 or name5:
summary0 = fantasy_team_stats(name1, name2, name3, name4, name5)
summary2 = scale_team("TEAM 1", summary0)
cur = team_summary(summary2, "TEAM 2")
else:
cur = team_summary(t0, "TEAM 2")
return cur
@app.callback(
Output(component_id='output-container-button', component_property='children'),
[dash.dependencies.Input(component_id='button', component_property='n_clicks')],
[
dash.dependencies.State("pg", 'value'),
dash.dependencies.State("sg", 'value'),
dash.dependencies.State("sf", 'value'),
dash.dependencies.State("pf", 'value'),
dash.dependencies.State("c", 'value'),
dash.dependencies.State("pg2", 'value'),
dash.dependencies.State("sg2", 'value'),
dash.dependencies.State("sf2", 'value'),
dash.dependencies.State("pf2", 'value'),
dash.dependencies.State("c2", 'value')
]
)
def update_output(n_clicks, player1, player2, player3, player4, player5, player6, player7, player8, player9, player10):
if n_clicks is None:
raise PreventUpdate
else:
if player1 and player2 and player3 and player4 and player5 and player6 and player7 and player8 and player9 and player10:
ability1, ability2, score1, score2, result = results(player1, player2, player3, player4,
player5, player6, player7, player8, player9, player10)
return u'''
{}, the score is {} to {}
'''.format(result, score1, score2)
else:
return "You have to select all 10 players"
if __name__ == '__main__':
app.run_server(debug=True) |
py | b40aaa57dee7b518e18a69200f961500f37cb60a | # -*- coding: utf-8 -*-
"""
ultratb.py -- Spice up your tracebacks!
* ColorTB
I've always found it a bit hard to visually parse tracebacks in Python. The
ColorTB class is a solution to that problem. It colors the different parts of a
traceback in a manner similar to what you would expect from a syntax-highlighting
text editor.
Installation instructions for ColorTB:
import sys,ultratb
sys.excepthook = ultratb.ColorTB()
* VerboseTB
I've also included a port of Ka-Ping Yee's "cgitb.py" that produces all kinds
of useful info when a traceback occurs. Ping originally had it spit out HTML
and intended it for CGI programmers, but why should they have all the fun? I
altered it to spit out colored text to the terminal. It's a bit overwhelming,
but kind of neat, and maybe useful for long-running programs that you believe
are bug-free. If a crash *does* occur in that type of program you want details.
Give it a shot--you'll love it or you'll hate it.
Note:
The Verbose mode prints the variables currently visible where the exception
happened (shortening their strings if too long). This can potentially be
very slow, if you happen to have a huge data structure whose string
representation is complex to compute. Your computer may appear to freeze for
a while with cpu usage at 100%. If this occurs, you can cancel the traceback
with Ctrl-C (maybe hitting it more than once).
If you encounter this kind of situation often, you may want to use the
Verbose_novars mode instead of the regular Verbose, which avoids formatting
variables (but otherwise includes the information and context given by
Verbose).
Installation instructions for ColorTB:
import sys,ultratb
sys.excepthook = ultratb.VerboseTB()
Note: Much of the code in this module was lifted verbatim from the standard
library module 'traceback.py' and Ka-Ping Yee's 'cgitb.py'.
* Color schemes
The colors are defined in the class TBTools through the use of the
ColorSchemeTable class. Currently the following exist:
- NoColor: allows all of this module to be used in any terminal (the color
escapes are just dummy blank strings).
- Linux: is meant to look good in a terminal like the Linux console (black
or very dark background).
- LightBG: similar to Linux but swaps dark/light colors to be more readable
in light background terminals.
You can implement other color schemes easily, the syntax is fairly
self-explanatory. Please send back new schemes you develop to the author for
possible inclusion in future releases.
"""
#*****************************************************************************
# Copyright (C) 2001 Nathaniel Gray <[email protected]>
# Copyright (C) 2001-2004 Fernando Perez <[email protected]>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
from __future__ import with_statement
import inspect
import keyword
import linecache
import os
import pydoc
import re
import string
import sys
import time
import tokenize
import traceback
import types
# For purposes of monkeypatching inspect to fix a bug in it.
from inspect import getsourcefile, getfile, getmodule,\
ismodule, isclass, ismethod, isfunction, istraceback, isframe, iscode
# IPython's own modules
# Modified pdb which doesn't damage IPython's readline handling
from IPython.core import debugger, ipapi
from IPython.core.display_trap import DisplayTrap
from IPython.core.excolors import exception_colors
from IPython.utils import PyColorize
from IPython.utils import io
from IPython.utils.data import uniq_stable
from IPython.utils.warn import info, error
# Globals
# amount of space to put line numbers before verbose tracebacks
INDENT_SIZE = 8
# Default color scheme. This is used, for example, by the traceback
# formatter. When running in an actual IPython instance, the user's rc.colors
# value is used, but havinga module global makes this functionality available
# to users of ultratb who are NOT running inside ipython.
DEFAULT_SCHEME = 'NoColor'
#---------------------------------------------------------------------------
# Code begins
# Utility functions
def inspect_error():
"""Print a message about internal inspect errors.
These are unfortunately quite common."""
error('Internal Python error in the inspect module.\n'
'Below is the traceback from this internal error.\n')
def findsource(object):
"""Return the entire source file and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of all the lines
in the file and the line number indexes a line in that list. An IOError
is raised if the source code cannot be retrieved.
FIXED version with which we monkeypatch the stdlib to work around a bug."""
file = getsourcefile(object) or getfile(object)
# If the object is a frame, then trying to get the globals dict from its
# module won't work. Instead, the frame object itself has the globals
# dictionary.
globals_dict = None
if inspect.isframe(object):
# XXX: can this ever be false?
globals_dict = object.f_globals
else:
module = getmodule(object, file)
if module:
globals_dict = module.__dict__
lines = linecache.getlines(file, globals_dict)
if not lines:
raise IOError('could not get source code')
if ismodule(object):
return lines, 0
if isclass(object):
name = object.__name__
pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
return lines, i
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
return lines, candidates[0][1]
else:
raise IOError('could not find class definition')
if ismethod(object):
object = object.im_func
if isfunction(object):
object = object.func_code
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
if not hasattr(object, 'co_firstlineno'):
raise IOError('could not find function definition')
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
pmatch = pat.match
# fperez - fix: sometimes, co_firstlineno can give a number larger than
# the length of lines, which causes an error. Safeguard against that.
lnum = min(object.co_firstlineno,len(lines))-1
while lnum > 0:
if pmatch(lines[lnum]): break
lnum -= 1
return lines, lnum
raise IOError('could not find code object')
# Monkeypatch inspect to apply our bugfix. This code only works with py25
if sys.version_info[:2] >= (2,5):
inspect.findsource = findsource
def fix_frame_records_filenames(records):
"""Try to fix the filenames in each record from inspect.getinnerframes().
Particularly, modules loaded from within zip files have useless filenames
attached to their code object, and inspect.getinnerframes() just uses it.
"""
fixed_records = []
for frame, filename, line_no, func_name, lines, index in records:
# Look inside the frame's globals dictionary for __file__, which should
# be better.
better_fn = frame.f_globals.get('__file__', None)
if isinstance(better_fn, str):
# Check the type just in case someone did something weird with
# __file__. It might also be None if the error occurred during
# import.
filename = better_fn
fixed_records.append((frame, filename, line_no, func_name, lines, index))
return fixed_records
def _fixed_getinnerframes(etb, context=1,tb_offset=0):
import linecache
LNUM_POS, LINES_POS, INDEX_POS = 2, 4, 5
records = fix_frame_records_filenames(inspect.getinnerframes(etb, context))
# If the error is at the console, don't build any context, since it would
# otherwise produce 5 blank lines printed out (there is no file at the
# console)
rec_check = records[tb_offset:]
try:
rname = rec_check[0][1]
if rname == '<ipython console>' or rname.endswith('<string>'):
return rec_check
except IndexError:
pass
aux = traceback.extract_tb(etb)
assert len(records) == len(aux)
for i, (file, lnum, _, _) in zip(range(len(records)), aux):
maybeStart = lnum-1 - context//2
start = max(maybeStart, 0)
end = start + context
lines = linecache.getlines(file)[start:end]
# pad with empty lines if necessary
if maybeStart < 0:
lines = (['\n'] * -maybeStart) + lines
if len(lines) < context:
lines += ['\n'] * (context - len(lines))
buf = list(records[i])
buf[LNUM_POS] = lnum
buf[INDEX_POS] = lnum - 1 - start
buf[LINES_POS] = lines
records[i] = tuple(buf)
return records[tb_offset:]
# Helper function -- largely belongs to VerboseTB, but we need the same
# functionality to produce a pseudo verbose TB for SyntaxErrors, so that they
# can be recognized properly by ipython.el's py-traceback-line-re
# (SyntaxErrors have to be treated specially because they have no traceback)
_parser = PyColorize.Parser()
def _format_traceback_lines(lnum, index, lines, Colors, lvals=None,scheme=None):
numbers_width = INDENT_SIZE - 1
res = []
i = lnum - index
# This lets us get fully syntax-highlighted tracebacks.
if scheme is None:
ipinst = ipapi.get()
if ipinst is not None:
scheme = ipinst.colors
else:
scheme = DEFAULT_SCHEME
_line_format = _parser.format2
for line in lines:
new_line, err = _line_format(line,'str',scheme)
if not err: line = new_line
if i == lnum:
# This is the line with the error
pad = numbers_width - len(str(i))
if pad >= 3:
marker = '-'*(pad-3) + '-> '
elif pad == 2:
marker = '> '
elif pad == 1:
marker = '>'
else:
marker = ''
num = marker + str(i)
line = '%s%s%s %s%s' %(Colors.linenoEm, num,
Colors.line, line, Colors.Normal)
else:
num = '%*s' % (numbers_width,i)
line = '%s%s%s %s' %(Colors.lineno, num,
Colors.Normal, line)
res.append(line)
if lvals and i == lnum:
res.append(lvals + '\n')
i = i + 1
return res
#---------------------------------------------------------------------------
# Module classes
class TBTools(object):
"""Basic tools used by all traceback printer classes."""
# Number of frames to skip when reporting tracebacks
tb_offset = 0
def __init__(self, color_scheme='NoColor', call_pdb=False, ostream=None):
# Whether to call the interactive pdb debugger after printing
# tracebacks or not
self.call_pdb = call_pdb
# Output stream to write to. Note that we store the original value in
# a private attribute and then make the public ostream a property, so
# that we can delay accessing io.Term.cout until runtime. The way
# things are written now, the Term.cout object is dynamically managed
# so a reference to it should NEVER be stored statically. This
# property approach confines this detail to a single location, and all
# subclasses can simply access self.ostream for writing.
self._ostream = ostream
# Create color table
self.color_scheme_table = exception_colors()
self.set_colors(color_scheme)
self.old_scheme = color_scheme # save initial value for toggles
if call_pdb:
self.pdb = debugger.Pdb(self.color_scheme_table.active_scheme_name)
else:
self.pdb = None
def _get_ostream(self):
"""Output stream that exceptions are written to.
Valid values are:
- None: the default, which means that IPython will dynamically resolve
to io.Term.cout. This ensures compatibility with most tools, including
Windows (where plain stdout doesn't recognize ANSI escapes).
- Any object with 'write' and 'flush' attributes.
"""
return io.Term.cout if self._ostream is None else self._ostream
def _set_ostream(self, val):
assert val is None or (hasattr(val, 'write') and hasattr(val, 'flush'))
self._ostream = val
ostream = property(_get_ostream, _set_ostream)
def set_colors(self,*args,**kw):
"""Shorthand access to the color table scheme selector method."""
# Set own color table
self.color_scheme_table.set_active_scheme(*args,**kw)
# for convenience, set Colors to the active scheme
self.Colors = self.color_scheme_table.active_colors
# Also set colors of debugger
if hasattr(self,'pdb') and self.pdb is not None:
self.pdb.set_colors(*args,**kw)
def color_toggle(self):
"""Toggle between the currently active color scheme and NoColor."""
if self.color_scheme_table.active_scheme_name == 'NoColor':
self.color_scheme_table.set_active_scheme(self.old_scheme)
self.Colors = self.color_scheme_table.active_colors
else:
self.old_scheme = self.color_scheme_table.active_scheme_name
self.color_scheme_table.set_active_scheme('NoColor')
self.Colors = self.color_scheme_table.active_colors
def stb2text(self, stb):
"""Convert a structured traceback (a list) to a string."""
return '\n'.join(stb)
def text(self, etype, value, tb, tb_offset=None, context=5):
"""Return formatted traceback.
Subclasses may override this if they add extra arguments.
"""
tb_list = self.structured_traceback(etype, value, tb,
tb_offset, context)
return self.stb2text(tb_list)
def structured_traceback(self, etype, evalue, tb, tb_offset=None,
context=5, mode=None):
"""Return a list of traceback frames.
Must be implemented by each class.
"""
raise NotImplementedError()
#---------------------------------------------------------------------------
class ListTB(TBTools):
"""Print traceback information from a traceback list, with optional color.
Calling: requires 3 arguments:
(etype, evalue, elist)
as would be obtained by:
etype, evalue, tb = sys.exc_info()
if tb:
elist = traceback.extract_tb(tb)
else:
elist = None
It can thus be used by programs which need to process the traceback before
printing (such as console replacements based on the code module from the
standard library).
Because they are meant to be called without a full traceback (only a
list), instances of this class can't call the interactive pdb debugger."""
def __init__(self,color_scheme = 'NoColor', call_pdb=False, ostream=None):
TBTools.__init__(self, color_scheme=color_scheme, call_pdb=call_pdb,
ostream=ostream)
def __call__(self, etype, value, elist):
self.ostream.flush()
self.ostream.write(self.text(etype, value, elist))
self.ostream.write('\n')
def structured_traceback(self, etype, value, elist, tb_offset=None,
context=5):
"""Return a color formatted string with the traceback info.
Parameters
----------
etype : exception type
Type of the exception raised.
value : object
Data stored in the exception
elist : list
List of frames, see class docstring for details.
tb_offset : int, optional
Number of frames in the traceback to skip. If not given, the
instance value is used (set in constructor).
context : int, optional
Number of lines of context information to print.
Returns
-------
String with formatted exception.
"""
tb_offset = self.tb_offset if tb_offset is None else tb_offset
Colors = self.Colors
out_list = []
if elist:
if tb_offset and len(elist) > tb_offset:
elist = elist[tb_offset:]
out_list.append('Traceback %s(most recent call last)%s:' %
(Colors.normalEm, Colors.Normal) + '\n')
out_list.extend(self._format_list(elist))
# The exception info should be a single entry in the list.
lines = ''.join(self._format_exception_only(etype, value))
out_list.append(lines)
# Note: this code originally read:
## for line in lines[:-1]:
## out_list.append(" "+line)
## out_list.append(lines[-1])
# This means it was indenting everything but the last line by a little
# bit. I've disabled this for now, but if we see ugliness somewhre we
# can restore it.
return out_list
def _format_list(self, extracted_list):
"""Format a list of traceback entry tuples for printing.
Given a list of tuples as returned by extract_tb() or
extract_stack(), return a list of strings ready for printing.
Each string in the resulting list corresponds to the item with the
same index in the argument list. Each string ends in a newline;
the strings may contain internal newlines as well, for those items
whose source text line is not None.
Lifted almost verbatim from traceback.py
"""
Colors = self.Colors
list = []
for filename, lineno, name, line in extracted_list[:-1]:
item = ' File %s"%s"%s, line %s%d%s, in %s%s%s\n' % \
(Colors.filename, filename, Colors.Normal,
Colors.lineno, lineno, Colors.Normal,
Colors.name, name, Colors.Normal)
if line:
item = item + ' %s\n' % line.strip()
list.append(item)
# Emphasize the last entry
filename, lineno, name, line = extracted_list[-1]
item = '%s File %s"%s"%s, line %s%d%s, in %s%s%s%s\n' % \
(Colors.normalEm,
Colors.filenameEm, filename, Colors.normalEm,
Colors.linenoEm, lineno, Colors.normalEm,
Colors.nameEm, name, Colors.normalEm,
Colors.Normal)
if line:
item = item + '%s %s%s\n' % (Colors.line, line.strip(),
Colors.Normal)
list.append(item)
#from pprint import pformat; print 'LISTTB', pformat(list) # dbg
return list
def _format_exception_only(self, etype, value):
"""Format the exception part of a traceback.
The arguments are the exception type and value such as given by
sys.exc_info()[:2]. The return value is a list of strings, each ending
in a newline. Normally, the list contains a single string; however,
for SyntaxError exceptions, it contains several lines that (when
printed) display detailed information about where the syntax error
occurred. The message indicating which exception occurred is the
always last string in the list.
Also lifted nearly verbatim from traceback.py
"""
have_filedata = False
Colors = self.Colors
list = []
try:
stype = Colors.excName + etype.__name__ + Colors.Normal
except AttributeError:
stype = etype # String exceptions don't get special coloring
if value is None:
list.append( str(stype) + '\n')
else:
if etype is SyntaxError:
try:
msg, (filename, lineno, offset, line) = value
except:
have_filedata = False
else:
have_filedata = True
#print 'filename is',filename # dbg
if not filename: filename = "<string>"
list.append('%s File %s"%s"%s, line %s%d%s\n' % \
(Colors.normalEm,
Colors.filenameEm, filename, Colors.normalEm,
Colors.linenoEm, lineno, Colors.Normal ))
if line is not None:
i = 0
while i < len(line) and line[i].isspace():
i = i+1
list.append('%s %s%s\n' % (Colors.line,
line.strip(),
Colors.Normal))
if offset is not None:
s = ' '
for c in line[i:offset-1]:
if c.isspace():
s = s + c
else:
s = s + ' '
list.append('%s%s^%s\n' % (Colors.caret, s,
Colors.Normal) )
value = msg
s = self._some_str(value)
if s:
list.append('%s%s:%s %s\n' % (str(stype), Colors.excName,
Colors.Normal, s))
else:
list.append('%s\n' % str(stype))
# sync with user hooks
if have_filedata:
ipinst = ipapi.get()
if ipinst is not None:
ipinst.hooks.synchronize_with_editor(filename, lineno, 0)
return list
def get_exception_only(self, etype, value):
"""Only print the exception type and message, without a traceback.
Parameters
----------
etype : exception type
value : exception value
"""
return ListTB.structured_traceback(self, etype, value, [])
def show_exception_only(self, etype, evalue):
"""Only print the exception type and message, without a traceback.
Parameters
----------
etype : exception type
value : exception value
"""
# This method needs to use __call__ from *this* class, not the one from
# a subclass whose signature or behavior may be different
ostream = self.ostream
ostream.flush()
ostream.write('\n'.join(self.get_exception_only(etype, evalue)))
ostream.flush()
def _some_str(self, value):
# Lifted from traceback.py
try:
return str(value)
except:
return '<unprintable %s object>' % type(value).__name__
#----------------------------------------------------------------------------
class VerboseTB(TBTools):
"""A port of Ka-Ping Yee's cgitb.py module that outputs color text instead
of HTML. Requires inspect and pydoc. Crazy, man.
Modified version which optionally strips the topmost entries from the
traceback, to be used with alternate interpreters (because their own code
would appear in the traceback)."""
def __init__(self,color_scheme = 'Linux', call_pdb=False, ostream=None,
tb_offset=0, long_header=False, include_vars=True):
"""Specify traceback offset, headers and color scheme.
Define how many frames to drop from the tracebacks. Calling it with
tb_offset=1 allows use of this handler in interpreters which will have
their own code at the top of the traceback (VerboseTB will first
remove that frame before printing the traceback info)."""
TBTools.__init__(self, color_scheme=color_scheme, call_pdb=call_pdb,
ostream=ostream)
self.tb_offset = tb_offset
self.long_header = long_header
self.include_vars = include_vars
def structured_traceback(self, etype, evalue, etb, tb_offset=None,
context=5):
"""Return a nice text document describing the traceback."""
tb_offset = self.tb_offset if tb_offset is None else tb_offset
# some locals
try:
etype = etype.__name__
except AttributeError:
pass
Colors = self.Colors # just a shorthand + quicker name lookup
ColorsNormal = Colors.Normal # used a lot
col_scheme = self.color_scheme_table.active_scheme_name
indent = ' '*INDENT_SIZE
em_normal = '%s\n%s%s' % (Colors.valEm, indent,ColorsNormal)
undefined = '%sundefined%s' % (Colors.em, ColorsNormal)
exc = '%s%s%s' % (Colors.excName,etype,ColorsNormal)
# some internal-use functions
def text_repr(value):
"""Hopefully pretty robust repr equivalent."""
# this is pretty horrible but should always return *something*
try:
return pydoc.text.repr(value)
except KeyboardInterrupt:
raise
except:
try:
return repr(value)
except KeyboardInterrupt:
raise
except:
try:
# all still in an except block so we catch
# getattr raising
name = getattr(value, '__name__', None)
if name:
# ick, recursion
return text_repr(name)
klass = getattr(value, '__class__', None)
if klass:
return '%s instance' % text_repr(klass)
except KeyboardInterrupt:
raise
except:
return 'UNRECOVERABLE REPR FAILURE'
def eqrepr(value, repr=text_repr): return '=%s' % repr(value)
def nullrepr(value, repr=text_repr): return ''
# meat of the code begins
try:
etype = etype.__name__
except AttributeError:
pass
if self.long_header:
# Header with the exception type, python version, and date
pyver = 'Python ' + string.split(sys.version)[0] + ': ' + sys.executable
date = time.ctime(time.time())
head = '%s%s%s\n%s%s%s\n%s' % (Colors.topline, '-'*75, ColorsNormal,
exc, ' '*(75-len(str(etype))-len(pyver)),
pyver, date.rjust(75) )
head += "\nA problem occured executing Python code. Here is the sequence of function"\
"\ncalls leading up to the error, with the most recent (innermost) call last."
else:
# Simplified header
head = '%s%s%s\n%s%s' % (Colors.topline, '-'*75, ColorsNormal,exc,
'Traceback (most recent call last)'.\
rjust(75 - len(str(etype)) ) )
frames = []
# Flush cache before calling inspect. This helps alleviate some of the
# problems with python 2.3's inspect.py.
linecache.checkcache()
# Drop topmost frames if requested
try:
# Try the default getinnerframes and Alex's: Alex's fixes some
# problems, but it generates empty tracebacks for console errors
# (5 blanks lines) where none should be returned.
#records = inspect.getinnerframes(etb, context)[tb_offset:]
#print 'python records:', records # dbg
records = _fixed_getinnerframes(etb, context, tb_offset)
#print 'alex records:', records # dbg
except:
# FIXME: I've been getting many crash reports from python 2.3
# users, traceable to inspect.py. If I can find a small test-case
# to reproduce this, I should either write a better workaround or
# file a bug report against inspect (if that's the real problem).
# So far, I haven't been able to find an isolated example to
# reproduce the problem.
inspect_error()
traceback.print_exc(file=self.ostream)
info('\nUnfortunately, your original traceback can not be constructed.\n')
return ''
# build some color string templates outside these nested loops
tpl_link = '%s%%s%s' % (Colors.filenameEm,ColorsNormal)
tpl_call = 'in %s%%s%s%%s%s' % (Colors.vName, Colors.valEm,
ColorsNormal)
tpl_call_fail = 'in %s%%s%s(***failed resolving arguments***)%s' % \
(Colors.vName, Colors.valEm, ColorsNormal)
tpl_local_var = '%s%%s%s' % (Colors.vName, ColorsNormal)
tpl_global_var = '%sglobal%s %s%%s%s' % (Colors.em, ColorsNormal,
Colors.vName, ColorsNormal)
tpl_name_val = '%%s %s= %%s%s' % (Colors.valEm, ColorsNormal)
tpl_line = '%s%%s%s %%s' % (Colors.lineno, ColorsNormal)
tpl_line_em = '%s%%s%s %%s%s' % (Colors.linenoEm,Colors.line,
ColorsNormal)
# now, loop over all records printing context and info
abspath = os.path.abspath
for frame, file, lnum, func, lines, index in records:
#print '*** record:',file,lnum,func,lines,index # dbg
try:
file = file and abspath(file) or '?'
except OSError:
# if file is '<console>' or something not in the filesystem,
# the abspath call will throw an OSError. Just ignore it and
# keep the original file string.
pass
link = tpl_link % file
try:
args, varargs, varkw, locals = inspect.getargvalues(frame)
except:
# This can happen due to a bug in python2.3. We should be
# able to remove this try/except when 2.4 becomes a
# requirement. Bug details at http://python.org/sf/1005466
inspect_error()
traceback.print_exc(file=self.ostream)
info("\nIPython's exception reporting continues...\n")
if func == '?':
call = ''
else:
# Decide whether to include variable details or not
var_repr = self.include_vars and eqrepr or nullrepr
try:
call = tpl_call % (func,inspect.formatargvalues(args,
varargs, varkw,
locals,formatvalue=var_repr))
except KeyError:
# Very odd crash from inspect.formatargvalues(). The
# scenario under which it appeared was a call to
# view(array,scale) in NumTut.view.view(), where scale had
# been defined as a scalar (it should be a tuple). Somehow
# inspect messes up resolving the argument list of view()
# and barfs out. At some point I should dig into this one
# and file a bug report about it.
inspect_error()
traceback.print_exc(file=self.ostream)
info("\nIPython's exception reporting continues...\n")
call = tpl_call_fail % func
# Initialize a list of names on the current line, which the
# tokenizer below will populate.
names = []
def tokeneater(token_type, token, start, end, line):
"""Stateful tokeneater which builds dotted names.
The list of names it appends to (from the enclosing scope) can
contain repeated composite names. This is unavoidable, since
there is no way to disambguate partial dotted structures until
the full list is known. The caller is responsible for pruning
the final list of duplicates before using it."""
# build composite names
if token == '.':
try:
names[-1] += '.'
# store state so the next token is added for x.y.z names
tokeneater.name_cont = True
return
except IndexError:
pass
if token_type == tokenize.NAME and token not in keyword.kwlist:
if tokeneater.name_cont:
# Dotted names
names[-1] += token
tokeneater.name_cont = False
else:
# Regular new names. We append everything, the caller
# will be responsible for pruning the list later. It's
# very tricky to try to prune as we go, b/c composite
# names can fool us. The pruning at the end is easy
# to do (or the caller can print a list with repeated
# names if so desired.
names.append(token)
elif token_type == tokenize.NEWLINE:
raise IndexError
# we need to store a bit of state in the tokenizer to build
# dotted names
tokeneater.name_cont = False
def linereader(file=file, lnum=[lnum], getline=linecache.getline):
line = getline(file, lnum[0])
lnum[0] += 1
return line
# Build the list of names on this line of code where the exception
# occurred.
try:
# This builds the names list in-place by capturing it from the
# enclosing scope.
tokenize.tokenize(linereader, tokeneater)
except IndexError:
# signals exit of tokenizer
pass
except tokenize.TokenError,msg:
_m = ("An unexpected error occurred while tokenizing input\n"
"The following traceback may be corrupted or invalid\n"
"The error message is: %s\n" % msg)
error(_m)
# prune names list of duplicates, but keep the right order
unique_names = uniq_stable(names)
# Start loop over vars
lvals = []
if self.include_vars:
for name_full in unique_names:
name_base = name_full.split('.',1)[0]
if name_base in frame.f_code.co_varnames:
if locals.has_key(name_base):
try:
value = repr(eval(name_full,locals))
except:
value = undefined
else:
value = undefined
name = tpl_local_var % name_full
else:
if frame.f_globals.has_key(name_base):
try:
value = repr(eval(name_full,frame.f_globals))
except:
value = undefined
else:
value = undefined
name = tpl_global_var % name_full
lvals.append(tpl_name_val % (name,value))
if lvals:
lvals = '%s%s' % (indent,em_normal.join(lvals))
else:
lvals = ''
level = '%s %s\n' % (link,call)
if index is None:
frames.append(level)
else:
frames.append('%s%s' % (level,''.join(
_format_traceback_lines(lnum,index,lines,Colors,lvals,
col_scheme))))
# Get (safely) a string form of the exception info
try:
etype_str,evalue_str = map(str,(etype,evalue))
except:
# User exception is improperly defined.
etype,evalue = str,sys.exc_info()[:2]
etype_str,evalue_str = map(str,(etype,evalue))
# ... and format it
exception = ['%s%s%s: %s' % (Colors.excName, etype_str,
ColorsNormal, evalue_str)]
if type(evalue) is types.InstanceType:
try:
names = [w for w in dir(evalue) if isinstance(w, basestring)]
except:
# Every now and then, an object with funny inernals blows up
# when dir() is called on it. We do the best we can to report
# the problem and continue
_m = '%sException reporting error (object with broken dir())%s:'
exception.append(_m % (Colors.excName,ColorsNormal))
etype_str,evalue_str = map(str,sys.exc_info()[:2])
exception.append('%s%s%s: %s' % (Colors.excName,etype_str,
ColorsNormal, evalue_str))
names = []
for name in names:
value = text_repr(getattr(evalue, name))
exception.append('\n%s%s = %s' % (indent, name, value))
# vds: >>
if records:
filepath, lnum = records[-1][1:3]
#print "file:", str(file), "linenb", str(lnum) # dbg
filepath = os.path.abspath(filepath)
ipinst = ipapi.get()
if ipinst is not None:
ipinst.hooks.synchronize_with_editor(filepath, lnum, 0)
# vds: <<
# return all our info assembled as a single string
# return '%s\n\n%s\n%s' % (head,'\n'.join(frames),''.join(exception[0]) )
return [head] + frames + [''.join(exception[0])]
def debugger(self,force=False):
"""Call up the pdb debugger if desired, always clean up the tb
reference.
Keywords:
- force(False): by default, this routine checks the instance call_pdb
flag and does not actually invoke the debugger if the flag is false.
The 'force' option forces the debugger to activate even if the flag
is false.
If the call_pdb flag is set, the pdb interactive debugger is
invoked. In all cases, the self.tb reference to the current traceback
is deleted to prevent lingering references which hamper memory
management.
Note that each call to pdb() does an 'import readline', so if your app
requires a special setup for the readline completers, you'll have to
fix that by hand after invoking the exception handler."""
if force or self.call_pdb:
if self.pdb is None:
self.pdb = debugger.Pdb(
self.color_scheme_table.active_scheme_name)
# the system displayhook may have changed, restore the original
# for pdb
display_trap = DisplayTrap(hook=sys.__displayhook__)
with display_trap:
self.pdb.reset()
# Find the right frame so we don't pop up inside ipython itself
if hasattr(self,'tb') and self.tb is not None:
etb = self.tb
else:
etb = self.tb = sys.last_traceback
while self.tb is not None and self.tb.tb_next is not None:
self.tb = self.tb.tb_next
if etb and etb.tb_next:
etb = etb.tb_next
self.pdb.botframe = etb.tb_frame
self.pdb.interaction(self.tb.tb_frame, self.tb)
if hasattr(self,'tb'):
del self.tb
def handler(self, info=None):
(etype, evalue, etb) = info or sys.exc_info()
self.tb = etb
ostream = self.ostream
ostream.flush()
ostream.write(self.text(etype, evalue, etb))
ostream.write('\n')
ostream.flush()
# Changed so an instance can just be called as VerboseTB_inst() and print
# out the right info on its own.
def __call__(self, etype=None, evalue=None, etb=None):
"""This hook can replace sys.excepthook (for Python 2.1 or higher)."""
if etb is None:
self.handler()
else:
self.handler((etype, evalue, etb))
try:
self.debugger()
except KeyboardInterrupt:
print "\nKeyboardInterrupt"
#----------------------------------------------------------------------------
class FormattedTB(VerboseTB, ListTB):
"""Subclass ListTB but allow calling with a traceback.
It can thus be used as a sys.excepthook for Python > 2.1.
Also adds 'Context' and 'Verbose' modes, not available in ListTB.
Allows a tb_offset to be specified. This is useful for situations where
one needs to remove a number of topmost frames from the traceback (such as
occurs with python programs that themselves execute other python code,
like Python shells). """
def __init__(self, mode='Plain', color_scheme='Linux', call_pdb=False,
ostream=None,
tb_offset=0, long_header=False, include_vars=False):
# NEVER change the order of this list. Put new modes at the end:
self.valid_modes = ['Plain','Context','Verbose']
self.verbose_modes = self.valid_modes[1:3]
VerboseTB.__init__(self, color_scheme=color_scheme, call_pdb=call_pdb,
ostream=ostream, tb_offset=tb_offset,
long_header=long_header, include_vars=include_vars)
# Different types of tracebacks are joined with different separators to
# form a single string. They are taken from this dict
self._join_chars = dict(Plain='', Context='\n', Verbose='\n')
# set_mode also sets the tb_join_char attribute
self.set_mode(mode)
def _extract_tb(self,tb):
if tb:
return traceback.extract_tb(tb)
else:
return None
def structured_traceback(self, etype, value, tb, tb_offset=None, context=5):
tb_offset = self.tb_offset if tb_offset is None else tb_offset
mode = self.mode
if mode in self.verbose_modes:
# Verbose modes need a full traceback
return VerboseTB.structured_traceback(
self, etype, value, tb, tb_offset, context
)
else:
# We must check the source cache because otherwise we can print
# out-of-date source code.
linecache.checkcache()
# Now we can extract and format the exception
elist = self._extract_tb(tb)
return ListTB.structured_traceback(
self, etype, value, elist, tb_offset, context
)
def stb2text(self, stb):
"""Convert a structured traceback (a list) to a string."""
return self.tb_join_char.join(stb)
def set_mode(self,mode=None):
"""Switch to the desired mode.
If mode is not specified, cycles through the available modes."""
if not mode:
new_idx = ( self.valid_modes.index(self.mode) + 1 ) % \
len(self.valid_modes)
self.mode = self.valid_modes[new_idx]
elif mode not in self.valid_modes:
raise ValueError, 'Unrecognized mode in FormattedTB: <'+mode+'>\n'\
'Valid modes: '+str(self.valid_modes)
else:
self.mode = mode
# include variable details only in 'Verbose' mode
self.include_vars = (self.mode == self.valid_modes[2])
# Set the join character for generating text tracebacks
self.tb_join_char = self._join_chars[mode]
# some convenient shorcuts
def plain(self):
self.set_mode(self.valid_modes[0])
def context(self):
self.set_mode(self.valid_modes[1])
def verbose(self):
self.set_mode(self.valid_modes[2])
#----------------------------------------------------------------------------
class AutoFormattedTB(FormattedTB):
"""A traceback printer which can be called on the fly.
It will find out about exceptions by itself.
A brief example:
AutoTB = AutoFormattedTB(mode = 'Verbose',color_scheme='Linux')
try:
...
except:
AutoTB() # or AutoTB(out=logfile) where logfile is an open file object
"""
def __call__(self,etype=None,evalue=None,etb=None,
out=None,tb_offset=None):
"""Print out a formatted exception traceback.
Optional arguments:
- out: an open file-like object to direct output to.
- tb_offset: the number of frames to skip over in the stack, on a
per-call basis (this overrides temporarily the instance's tb_offset
given at initialization time. """
if out is None:
out = self.ostream
out.flush()
out.write(self.text(etype, evalue, etb, tb_offset))
out.write('\n')
out.flush()
# FIXME: we should remove the auto pdb behavior from here and leave
# that to the clients.
try:
self.debugger()
except KeyboardInterrupt:
print "\nKeyboardInterrupt"
def structured_traceback(self, etype=None, value=None, tb=None,
tb_offset=None, context=5):
if etype is None:
etype,value,tb = sys.exc_info()
self.tb = tb
return FormattedTB.structured_traceback(
self, etype, value, tb, tb_offset, context)
#---------------------------------------------------------------------------
# A simple class to preserve Nathan's original functionality.
class ColorTB(FormattedTB):
"""Shorthand to initialize a FormattedTB in Linux colors mode."""
def __init__(self,color_scheme='Linux',call_pdb=0):
FormattedTB.__init__(self,color_scheme=color_scheme,
call_pdb=call_pdb)
class SyntaxTB(ListTB):
"""Extension which holds some state: the last exception value"""
def __init__(self,color_scheme = 'NoColor'):
ListTB.__init__(self,color_scheme)
self.last_syntax_error = None
def __call__(self, etype, value, elist):
self.last_syntax_error = value
ListTB.__call__(self,etype,value,elist)
def clear_err_state(self):
"""Return the current error state and clear it"""
e = self.last_syntax_error
self.last_syntax_error = None
return e
def stb2text(self, stb):
"""Convert a structured traceback (a list) to a string."""
return ''.join(stb)
#----------------------------------------------------------------------------
# module testing (minimal)
if __name__ == "__main__":
def spam(c, (d, e)):
x = c + d
y = c * d
foo(x, y)
def foo(a, b, bar=1):
eggs(a, b + bar)
def eggs(f, g, z=globals()):
h = f + g
i = f - g
return h / i
print ''
print '*** Before ***'
try:
print spam(1, (2, 3))
except:
traceback.print_exc()
print ''
handler = ColorTB()
print '*** ColorTB ***'
try:
print spam(1, (2, 3))
except:
apply(handler, sys.exc_info() )
print ''
handler = VerboseTB()
print '*** VerboseTB ***'
try:
print spam(1, (2, 3))
except:
apply(handler, sys.exc_info() )
print ''
|
py | b40aaba222f0b1b1bb3c1267820b7503a5fb44d8 | from ursina import *
# Main Menu Example, or it can be any kind of menu, like Inventory, Quest journal, etc.
# Created by Doctor
# 09 Feb 21
# Class of game menu
class MenuMenu(Entity):
def __init__(self, **kwargs):
super().__init__(parent=camera.ui, ignore_paused=True)
# Create empty entities that will be parents of our menus content
self.main_menu = Entity(parent=self, enabled=True)
self.options_menu = Entity(parent=self, enabled=False)
self.help_menu = Entity(parent=self, enabled=False)
# Add a background. You can change 'shore' to a different texture of you'd like.
self.background = Sprite('shore', color=color.dark_gray, z=1)
# [MAIN MENU] WINDOW START
# Title of our menu
Text("MAIN MENU", parent=self.main_menu, y=0.4, x=0, origin=(0,0))
# Reference of our action function for quit button
def quit_game():
application.quit()
# Reference of our action function for options button
def options_menu_btn():
self.options_menu.enable()
self.main_menu.disable()
# Reference of our action function for help button
def help_menu_btn():
self.help_menu.enable()
self.main_menu.disable()
# Button list
ButtonList(button_dict={
"Start": Func(print_on_screen,"You clicked on Start button!", position=(0,.1), origin=(0,0)),
"Options": Func(options_menu_btn),
"Help": Func(help_menu_btn),
"Exit": Func(quit_game)
},y=0,parent=self.main_menu)
# [MAIN MENU] WINDOW END
# [OPTIONS MENU] WINDOW START
# Title of our menu
Text ("OPTIONS MENU", parent=self.options_menu, y=0.4, x=0, origin=(0, 0))
# Reference of our action function for back button
def options_back_btn_action():
self.main_menu.enable()
self.options_menu.disable()
# Button
Button("Back",parent=self.options_menu,y=-0.3,scale=(0.1,0.05),color=rgb(50,50,50),
on_click=options_back_btn_action)
# [OPTIONS MENU] WINDOW END
# [HELP MENU] WINDOW START
# Title of our menu
Text ("HELP MENU", parent=self.help_menu, y=0.4, x=0, origin=(0, 0))
# Reference of our action function for back button
def help_back_btn_action():
self.main_menu.enable()
self.help_menu.disable()
# Button list
ButtonList (button_dict={
"Gameplay": Func(print_on_screen,"You clicked on Gameplay help button!", position=(0,.1), origin=(0,0)),
"Battle": Func(print_on_screen,"You clicked on Battle help button!", position=(0,.1), origin=(0,0)),
"Control": Func(print_on_screen,"You clicked on Control help button!", position=(0,.1), origin=(0,0)),
"Back": Func (help_back_btn_action)
}, y=0, parent=self.help_menu)
# [HELP MENU] WINDOW END
# Here we can change attributes of this class when call this class
for key, value in kwargs.items ():
setattr (self, key, value)
# Input function that check if key pressed on keyboard
def input(self, key):
# And if you want use same keys on different windows
# Like [Escape] or [Enter] or [Arrows]
# Just write like that:
# If our main menu enabled and we press [Escape]
if self.main_menu.enabled:
if key == "escape":
# Close app
application.quit()
# If our options menu enabled and we press [Escape]
if self.options_menu.enabled:
if key == "escape":
# Close options window and show main menu
self.main_menu.enable()
self.options_menu.disable()
# If our help menu enabled and we press [Escape]
if self.help_menu.enabled:
if key == "escape":
# Close help window and show main menu
self.main_menu.enable()
self.help_menu.disable()
# Update function that check something every frame
# You can use it similar to input with checking
# what menu is currently enabled
def update(self):
pass
# Setup window title
window.title = "Main Menu Tutorial"
# Init application
app = Ursina()
# Call our menu
main_menu = MenuMenu()
# Run application
app.run()
|
py | b40aaba30762c1f83e62f370a1c85c5952332d3a | def binom(num, k):
if num <= 1:
return 1
if k == 0 or k == num:
return 1
return binom(num - 1, k) + binom(num - 1, k - 1)
print(binom(5, 3))
|
py | b40aaba7951bae369a77a66a4133b4d35cabfb21 | #!/usr/bin/env python -OO
# -*- coding: utf-8 -*-
from __future__ import with_statement
from bs4 import BeautifulSoup
from glob import glob
from tkinter import ttk
import os
import re
import sqlite3
import sys
import threading
import Tkinter as tk
class Parser(tk.Frame):
"""docstring for parser"""
def __init__(self, parent, *args, **kwargs):
self.text = tk.StringVar()
self.dir = "j-archive"
self.database = "jclues.db"
self.parsed = 0
self.total = len(os.listdir(self.dir))
"""GUI SETUP"""
tk.Frame.__init__(self, parent, *args, **kwargs)
self.parent = parent
self.label = tk.Label(self.parent,text = "Click to Start")
self.label.pack()
self.parseButton = ttk.Button(self.parent,text="Parse Files", command=self.main)
self.closeButton = ttk.Button(self.parent,text="Close", command=self.parent.quit)
self.progress = ttk.Progressbar(self.parent, orient="horizontal",
length=200, mode="determinate")
self.parseButton.pack(side = tk.TOP)
self.progress["value"] = 0
self.progress.pack(side=tk.TOP)
def main(self):
def realMain():
self.progress.start()
self.parse()
print "Parsing game files"
self.parseButton.pack_forget()
threading.Thread(target=realMain).start()
def start(self):
self.maxbytes = self.total
self.progress["maximum"] = self.total
self.parse()
# threading.Thread(target=realMain).start()
def parse(self):
"""Loop thru all the games and parse them."""
self.sql = sqlite3.connect(self.database)
if not os.path.isdir(self.dir):
print "j-archive folder not found"
sys.exit(1)
NUMBER_OF_FILES = len(os.listdir(self.dir))
print "Parsing", NUMBER_OF_FILES, "files"
#[game, airdate, round, category, value, clue, answer]
if not os.path.isfile(self.database):
self.sql.execute("""PRAGMA foreign_keys = ON;""")
self.sql.execute("""CREATE TABLE clues(
id INTEGER PRIMARY KEY AUTOINCREMENT,
game INTEGER,
airdate TEXT,
round INTEGER,
category TEXT,
value INTEGER,
clue TEXT,
answer TEXT
);""")
for i, file_name in enumerate(glob(os.path.join(self.dir, "*.html")), 1):
self.parsed +=1
self.progress["value"] = self.parsed/self.total
self.label.config(text = "Parsing page %s" %i)
with open(os.path.abspath(file_name)) as f:
self.parse_game(f, i)
if i%1000 ==0:
print i,"committed"
self.sql.commit()
self.sql.commit()
self.progress.pack_forget
self.label.config(text = "All Done")
self.closeButton.pack()
print "All done"
def parse_game(self, f, gid):
"""Parses an entire Jeopardy! game and extract individual clues."""
bsoup = BeautifulSoup(f, "lxml")
# The title is in the format: `J! Archive - Show #XXXX, aired 2004-09-16`,
# where the last part is all that is required
airdate = bsoup.title.get_text().split()[-1]
if not self.parse_round(bsoup, 1, gid, airdate) or not self.parse_round(bsoup, 2, gid, airdate):
# One of the rounds does not exist
pass
# The final Jeopardy! round
r = bsoup.find("table", class_="final_round")
if not r:
# This game does not have a final clue
return
category = r.find("td", class_="category_name")
if not category:
print "err"
return
category = category.get_text()
text = r.find("td", class_="clue_text").get_text()
answer = BeautifulSoup(r.find("div", onmouseover=True).get("onmouseover"), "lxml")
answer = answer.find("em").get_text()
# False indicates no preset value for a clue
self.insert([gid, airdate, 3, category, False, text, answer])
def parse_round(self,bsoup, rnd, gid, airdate):
"""Parses and inserts the list of clues from a whole round."""
round_id = "jeopardy_round" if rnd == 1 else "double_jeopardy_round"
r = bsoup.find(id=round_id)
# The game may not have all the rounds
if not r:
return False
# The list of categories for this round
categories = [c.get_text() for c in r.find_all("td", class_="category_name")]
# The x_coord determines which category a clue is in
# because the categories come before the clues, we will
# have to match them up with the clues later on.
x = 0
for a in r.find_all("td", class_="clue"):
is_missing = True if not a.get_text().strip() else False
if not is_missing:
value = a.find("td", class_=re.compile("clue_value")).get_text().lstrip("D: $")
text = a.find("td", class_="clue_text").get_text()
answer = BeautifulSoup(a.find("div", onmouseover=True).get("onmouseover"), "lxml")
answer = answer.find("em", class_="correct_response").get_text()
self.insert([gid, airdate, rnd, categories[x], value, text, answer])
# Always update x, even if we skip
# a clue, as this keeps things in order. there
# are 6 categories, so once we reach the end,
# loop back to the beginning category.
#
# Using modulus is slower, e.g.:
#
# x += 1
# x %= 6
#
x = 0 if x == 5 else x + 1
return True
def insert(self,clue):
"""Inserts the given clue into the database."""
# Clue is [game, airdate, round, category, value, clue, answer]
# Note that at this point, clue[4] is False if round is 3
#[game, airdate, round, category, value, clue, answer]
if "\\\'" in clue[6]:
clue[6] = clue[6].replace("\\\'", "'")
if "\\\"" in clue[6]:
clue[6] = clue[6].replace("\\\"", "\"")
if not self.sql:
print clue
return
self.sql.execute("INSERT INTO clues Values(null,?, ?, ?, ?, ?, ?, ?)",(clue[0],clue[1],clue[2],clue[3],clue[4],clue[5],clue[6], ))
def StartParser():
root = tk.Tk()
root.title("Parser")
root.geometry("400x100")
root.resizable(0,0)
Parser(root)
root.mainloop()
if __name__ == "__main__":
StartParser()
|
py | b40aabad55a845147a53e03faa771c40ba6d6694 | import struct
import io
from a2s.exceptions import BufferExhaustedError
class ByteReader():
def __init__(self, stream, endian="=", encoding=None):
self.stream = stream
self.endian = endian
self.encoding = encoding
def read(self, size=-1):
data = self.stream.read(size)
if size > -1 and len(data) != size:
raise BufferExhaustedError()
return data
def peek(self, size=-1):
cur_pos = self.stream.tell()
data = self.stream.read(size)
self.stream.seek(cur_pos, io.SEEK_SET)
return data
def unpack(self, fmt):
fmt = self.endian + fmt
fmt_size = struct.calcsize(fmt)
return struct.unpack(fmt, self.read(fmt_size))
def unpack_one(self, fmt):
values = self.unpack(fmt)
assert len(values) == 1
return values[0]
def read_int8(self):
return self.unpack_one("b")
def read_uint8(self):
return self.unpack_one("B")
def read_int16(self):
return self.unpack_one("h")
def read_uint16(self):
return self.unpack_one("H")
def read_int32(self):
return self.unpack_one("l")
def read_uint32(self):
return self.unpack_one("L")
def read_int64(self):
return self.unpack_one("q")
def read_uint64(self):
return self.unpack_one("Q")
def read_float(self):
return self.unpack_one("f")
def read_double(self):
return self.unpack_one("d")
def read_bool(self):
return bool(self.unpack_one("b"))
def read_char(self):
char = self.unpack_one("c")
if self.encoding is not None:
return char.decode(self.encoding, errors="replace")
else:
return char
def read_cstring(self, charsize=1):
string = b""
while True:
c = self.read(charsize)
if int.from_bytes(c, "little") == 0:
break
else:
string += c
if self.encoding is not None:
return string.decode(self.encoding, errors="replace")
else:
return string
class ByteWriter():
def __init__(self, stream, endian="=", encoding=None):
self.stream = stream
self.endian = endian
self.encoding = encoding
def write(self, *args):
return self.stream.write(*args)
def pack(self, fmt, *values):
fmt = self.endian + fmt
fmt_size = struct.calcsize(fmt)
return self.stream.write(struct.pack(fmt, *values))
def write_int8(self, val):
self.pack("b", val)
def write_uint8(self, val):
self.pack("B", val)
def write_int16(self, val):
self.pack("h", val)
def write_uint16(self, val):
self.pack("H", val)
def write_int32(self, val):
self.pack("l", val)
def write_uint32(self, val):
self.pack("L", val)
def write_int64(self, val):
self.pack("q", val)
def write_uint64(self, val):
self.pack("Q", val)
def write_float(self, val):
self.pack("f", val)
def write_double(self, val):
self.pack("d", val)
def write_bool(self, val):
self.pack("b", val)
def write_char(self, val):
if self.encoding is not None:
self.pack("c", val.encode(self.encoding))
else:
self.pack("c", val)
def write_cstring(self, val):
if self.encoding is not None:
self.write(val.encode(self.encoding) + b"\x00")
else:
self.write(val + b"\x00")
|
py | b40aabed37dbbd671f0a1f0c67743328f8aa82d2 | """Example that iterates through a servo on every channel, sets each to 180 and then back to 0."""
import time
from adafruit_servokit import ServoKit
# Set channels to the number of servo channels on your kit.
# 8 for FeatherWing, 16 for Shield/HAT/Bonnet.
kit = ServoKit(channels=8)
for i in range(len(kit.servo)):
kit.servo[i].angle = 180
time.sleep(1)
kit.servo[i].angle = 0
time.sleep(1)
|
py | b40aace64ddfcf9b33f563460f85f8c7b5c5db04 | ''' Sample
This script loads a pretrained net and a weightsfile and sample '''
import functools
import math
import numpy as np
from tqdm import tqdm, trange
import torch
import torch.nn as nn
from torch.nn import init
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter as P
import torchvision
# Import my stuff
import inception_utils
import utils
import losses
def run(config):
# Prepare state dict, which holds things like epoch # and itr #
state_dict = {'itr': 0, 'epoch': 0, 'save_num': 0, 'save_best_num': 0,
'best_IS': 0, 'best_FID': 999999, 'config': config}
# Optionally, get the configuration from the state dict. This allows for
# recovery of the config provided only a state dict and experiment name,
# and can be convenient for writing less verbose sample shell scripts.
if config['config_from_name']:
utils.load_weights(None, None, state_dict, config['weights_root'],
config['experiment_name'], config['load_weights'], None,
strict=False, load_optim=False)
# Ignore items which we might want to overwrite from the command line
for item in state_dict['config']:
if item not in ['z_var', 'base_root', 'batch_size', 'G_batch_size', 'use_ema', 'G_eval_mode']:
config[item] = state_dict['config'][item]
# update config (see train.py for explanation)
config['resolution'] = utils.imsize_dict[config['dataset']]
config['n_classes'] = utils.nclass_dict[config['dataset']]
config['G_activation'] = utils.activation_dict[config['G_nl']]
config['D_activation'] = utils.activation_dict[config['D_nl']]
config = utils.update_config_roots(config)
config['skip_init'] = True
config['no_optim'] = True
device = 'cuda'
# Seed RNG
utils.seed_rng(config['seed'])
# Setup cudnn.benchmark for free speed
torch.backends.cudnn.benchmark = True
# Import the model--this line allows us to dynamically select different files.
model = __import__(config['model'])
experiment_name = (config['experiment_name'] if config['experiment_name']
else utils.name_from_config(config))
print('Experiment name is %s' % experiment_name)
G = model.Generator(**config).cuda()
utils.count_parameters(G)
# Load weights
print('Loading weights...')
# Here is where we deal with the ema--load ema weights or load normal weights
utils.load_weights(G if not (config['use_ema']) else None, None, state_dict,
config['weights_root'], experiment_name, config['load_weights'],
G if config['ema'] and config['use_ema'] else None,
strict=False, load_optim=False)
# Update batch size setting used for G
G_batch_size = max(config['G_batch_size'], config['batch_size'])
z_, y_ = utils.prepare_z_y(G_batch_size, G.dim_z, config['n_classes'],
device=device, fp16=config['G_fp16'],
z_var=config['z_var'])
if config['G_eval_mode']:
print('Putting G in eval mode..')
G.eval()
else:
print('G is in %s mode...' % ('training' if G.training else 'eval'))
#Sample function
sample = functools.partial(utils.sample, G=G, z_=z_, y_=y_, config=config)
if config['accumulate_stats']:
print('Accumulating standing stats across %d accumulations...' % config['num_standing_accumulations'])
utils.accumulate_standing_stats(G, z_, y_, config['n_classes'],
config['num_standing_accumulations'])
# Sample a number of images and save them to an NPZ, for use with TF-Inception
if config['sample_npz']:
# Lists to hold images and labels for images
x, y = [], []
print('Sampling %d images and saving them to npz...' % config['sample_num_npz'])
for i in trange(int(np.ceil(config['sample_num_npz'] / float(G_batch_size)))):
with torch.no_grad():
images, labels = sample()
x += [np.uint8(255 * (images.cpu().numpy() + 1) / 2.)]
y += [labels.cpu().numpy()]
x = np.concatenate(x, 0)[:config['sample_num_npz']]
y = np.concatenate(y, 0)[:config['sample_num_npz']]
print('Images shape: %s, Labels shape: %s' % (x.shape, y.shape))
npz_filename = '%s/%s/samples_all.npz' % (config['samples_root'], experiment_name)
print('Saving npz to %s...' % npz_filename)
np.savez(npz_filename, **{'x' : x, 'y' : y})
# Prepare sample sheets
if config['sample_sheets']:
print('Preparing conditional sample sheets...')
utils.sample_sheet(G, classes_per_sheet=utils.classes_per_sheet_dict[config['dataset']],
num_classes=config['n_classes'],
samples_per_class=10, parallel=config['parallel'],
samples_root=config['samples_root'],
experiment_name=experiment_name,
folder_number=config['sample_sheet_folder_num'],
z_=z_,)
# Sample interp sheets
if config['sample_interps']:
print('Preparing interp sheets...')
for fix_z, fix_y in zip([False, False, True], [False, True, False]):
utils.interp_sheet(G, num_per_sheet=16, num_midpoints=8,
num_classes=config['n_classes'],
parallel=config['parallel'],
samples_root=config['samples_root'],
experiment_name=experiment_name,
folder_number=config['sample_sheet_folder_num'],
sheet_number=0,
fix_z=fix_z, fix_y=fix_y, device='cuda')
# Sample random sheet
if config['sample_random']:
print('Preparing random sample sheet...')
images, labels = sample()
torchvision.utils.save_image(images.float(),
'%s/%s/random_samples.jpg' % (config['samples_root'], experiment_name),
nrow=int(G_batch_size**0.5),
normalize=True)
# Get Inception Score and FID
get_inception_metrics = inception_utils.prepare_inception_metrics(config['dataset'], config['parallel'], config['no_fid'])
# Prepare a simple function get metrics that we use for trunc curves
def get_metrics():
sample = functools.partial(utils.sample, G=G, z_=z_, y_=y_, config=config)
IS_mean, IS_std, FID = get_inception_metrics(sample, config['num_inception_images'], num_splits=10, prints=False)
# Prepare output string
outstring = 'Using %s weights ' % ('ema' if config['use_ema'] else 'non-ema')
outstring += 'in %s mode, ' % ('eval' if config['G_eval_mode'] else 'training')
outstring += 'with noise variance %3.3f, ' % z_.var
outstring += 'over %d images, ' % config['num_inception_images']
if config['accumulate_stats'] or not config['G_eval_mode']:
outstring += 'with batch size %d, ' % G_batch_size
if config['accumulate_stats']:
outstring += 'using %d standing stat accumulations, ' % config['num_standing_accumulations']
outstring += 'Itr %d: PYTORCH UNOFFICIAL Inception Score is %3.3f +/- %3.3f, PYTORCH UNOFFICIAL FID is %5.4f' % (state_dict['itr'], IS_mean, IS_std, FID)
print(outstring)
if config['sample_inception_metrics']:
print('Calculating Inception metrics...')
get_metrics()
# Sample truncation curve stuff. This is basically the same as the inception metrics code
if config['sample_trunc_curves']:
start, step, end = [float(item) for item in config['sample_trunc_curves'].split('_')]
print('Getting truncation values for variance in range (%3.3f:%3.3f:%3.3f)...' % (start, step, end))
for var in np.arange(start, end + step, step):
z_.var = var
# Optionally comment this out if you want to run with standing stats
# accumulated at one z variance setting
if config['accumulate_stats']:
utils.accumulate_standing_stats(G, z_, y_, config['n_classes'],
config['num_standing_accumulations'])
get_metrics()
def main():
# parse command line and run
parser = utils.prepare_parser()
parser = utils.add_sample_parser(parser)
config = vars(parser.parse_args())
print(config)
run(config)
if __name__ == '__main__':
main() |
py | b40aad26fdc784cc5dfaf249f1c167e4160e4887 | #Задачи на циклы и оператор условия------
#----------------------------------------
'''
Задача 1
Вывести на экран циклом пять строк из нулей, причем каждая строка должна быть пронумерована.
'''
for i in range(1, 6):
print(i, '0000000000000000000000000000000000000000000')
'''
Задача 2
Пользователь в цикле вводит 10 цифр. Найти количество введеных пользователем цифр 5.
'''
count = 0
for i in range(10):
user_data = int(input('Введите число: '))
if user_data == 5:
count += 1
print(count)
'''
Задача 3
Найти сумму ряда чисел от 1 до 100. Полученный результат вывести на экран.
'''
sum = 0
for i in range(1, 101):
sum += i
print(sum)
'''
Задача 4
Найти произведение ряда чисел от 1 до 10. Полученный результат вывести на экран.
'''
proiz = 1
for i in range(2, 11):
proiz *= i
print(proiz)
'''
Задача 5
Вывести цифры числа на каждой строчке.
'''
integer_number = 123456
start_del = len(str(integer_number)) - 1
delitel = 10 ** start_del
#print(integer_number % delitel, integer_number // delitel)
while integer_number > 0:
print(int(integer_number // delitel))
integer_number = integer_number % delitel
delitel /= 10
'''
Задача 6
Найти сумму цифр числа.
'''
integer_number = 123456
sum = 0
while integer_number > 0:
sum += integer_number % 10
integer_number = integer_number // 10
print(sum)
'''
Задача 7
Найти произведение цифр числа.
'''
integer_number = 123456
proiz = 1
while integer_number > 0:
proiz *= integer_number % 10
integer_number = integer_number // 10
print(proiz)
'''
Задача 8
Дать ответ на вопрос: есть ли среди цифр числа 5?
'''
integer_number = 125254
while integer_number > 0:
if integer_number % 10 == 5:
print('Yes')
break
integer_number = integer_number // 10
else:
print('No')
'''
Задача 9
Найти максимальную цифру в числе
'''
integer_number = 125278954
max_num = integer_number % 10
while integer_number > 0:
max_num = max(max_num, integer_number % 10)
integer_number = integer_number // 10
print(max_num)
'''
Задача 10
Найти количество цифр 5 в числе
'''
integer_number = 125278954
count_num = 0
while integer_number > 0:
if integer_number % 10 == 5:
count_num += 1
integer_number = integer_number // 10
print(count_num)
|
py | b40aaf1d8e86d99b3249a0a0f6922467b8de16f0 | from os import path
import setuptools
from setuptools.config import read_configuration
BASE_PATH = path.dirname(__file__)
CFG_PATH = path.join(BASE_PATH, "setup.cfg")
config = read_configuration(CFG_PATH)
version = config["metadata"]["version"]
setuptools.setup(
name="dbnd-spark",
package_dir={"": "src"},
install_requires=["dbnd==" + version],
extras_require=dict(tests=["pyspark==2.4.4"]),
entry_points={"dbnd": ["dbnd-spark = dbnd_spark._plugin"]},
)
|
py | b40ab163d29eb1e35f338be8de6edd7d0a4e8b75 | #!/usr/bin/env python3
import datetime
from pathlib import Path
from shutil import which
from socket import gethostbyname
from subprocess import DEVNULL, TimeoutExpired, Popen
import sys
import pytest
import trustme
from aiospamc.header_values import ContentLengthValue
from aiospamc.requests import Request
def pytest_addoption(parser):
parser.addoption("--spamd-process-timeout", action="store", default=10, type=int)
@pytest.fixture
def x_headers():
from aiospamc.header_values import GenericHeaderValue
return {"A": GenericHeaderValue(value="a"), "B": GenericHeaderValue(value="b")}
@pytest.fixture
def spam():
"""Example spam message using SpamAssassin's GTUBE message."""
return (
b"Subject: Test spam mail (GTUBE)\n"
b"Message-ID: <[email protected]>\n"
b"Date: Wed, 23 Jul 2003 23:30:00 +0200\n"
b"From: Sender <[email protected]>\n"
b"To: Recipient <[email protected]>\n"
b"Precedence: junk\n"
b"MIME-Version: 1.0\n"
b"Content-Type: text/plain; charset=us-ascii\n"
b"Content-Transfer-Encoding: 7bit\n\n"
b"This is the GTUBE, the\n"
b"\tGeneric\n"
b"\tTest for\n"
b"\tUnsolicited\n"
b"\tBulk\n"
b"\tEmail\n\n"
b"If your spam filter supports it, the GTUBE provides a test by which you\n"
b"can verify that the filter is installed correctly and is detecting incoming\n"
b"spam. You can send yourself a test mail containing the following string of\n"
b"characters (in upper case and with no white spaces and line breaks):\n\n"
b"XJS*C4JDBQADN1.NSBN3*2IDNEN*GTUBE-STANDARD-ANTI-UBE-TEST-EMAIL*C.34X\n\n"
b"You should send this test mail from an account outside of your network.\n\n"
)
@pytest.fixture
def request_with_body():
body = b"Test body\n"
return Request(
verb="CHECK",
version="1.5",
headers={"Content-length": ContentLengthValue(len(body))},
body=body,
)
@pytest.fixture
def request_ping():
"""PING request."""
return Request(verb="PING")
@pytest.fixture
def response_empty():
"""Empty response."""
return b""
@pytest.fixture
def response_ok():
"""OK response in bytes."""
return b"SPAMD/1.5 0 EX_OK\r\n\r\n"
@pytest.fixture
def response_pong():
"""PONG response in bytes."""
return b"SPAMD/1.5 0 PONG\r\n"
@pytest.fixture
def response_tell():
"""Examplte TELL response."""
return b"SPAMD/1.1 0 EX_OK\r\n\r\n\r\n"
@pytest.fixture
def response_spam_header():
"""Response with Spam header in bytes."""
return b"SPAMD/1.1 0 EX_OK\r\nSpam: True ; 1000.0 / 1.0\r\n\r\n"
@pytest.fixture
def response_with_body():
"""Response with body and Content-length header in bytes."""
return b"SPAMD/1.5 0 EX_OK\r\nContent-length: 10\r\n\r\nTest body\n"
@pytest.fixture
def response_empty_body():
"""Response with Content-length header, but empty body in bytes."""
return b"SPAMD/1.5 0 EX_OK\r\nContent-length: 0\r\n\r\n"
@pytest.fixture
def response_timeout():
"""Server timeout response."""
return b"SPAMD/1.0 79 Timeout: (30 second timeout while trying to CHECK)\r\n"
@pytest.fixture
def response_invalid():
"""Invalid response in bytes."""
return b"Invalid response"
# Response exceptions
@pytest.fixture
def ex_usage():
"""Command line usage error."""
return b"SPAMD/1.5 64 EX_USAGE\r\n\r\n"
@pytest.fixture
def ex_data_err():
"""Data format error."""
return b"SPAMD/1.5 65 EX_DATAERR\r\n\r\n"
@pytest.fixture
def ex_no_input():
"""No input response in bytes."""
return b"SPAMD/1.5 66 EX_NOINPUT\r\n\r\n"
@pytest.fixture
def ex_no_user():
"""No user response in bytes."""
return b"SPAMD/1.5 67 EX_NOUSER\r\n\r\n"
@pytest.fixture
def ex_no_host():
"""No host response in bytes."""
return b"SPAMD/1.5 68 EX_NOHOST\r\n\r\n"
@pytest.fixture
def ex_unavailable():
"""Unavailable response in bytes."""
return b"SPAMD/1.5 69 EX_UNAVAILABLE\r\n\r\n"
@pytest.fixture
def ex_software():
"""Software exception response in bytes."""
return b"SPAMD/1.5 70 EX_SOFTWARE\r\n\r\n"
@pytest.fixture
def ex_os_err():
"""Operating system error response in bytes."""
return b"SPAMD/1.5 71 EX_OSERR\r\n\r\n"
@pytest.fixture
def ex_os_file():
"""Operating system file error in bytes."""
return b"SPAMD/1.5 72 EX_OSFILE\r\n\r\n"
@pytest.fixture
def ex_cant_create():
"""Can't create response error in bytes."""
return b"SPAMD/1.5 73 EX_CANTCREAT\r\n\r\n"
@pytest.fixture
def ex_io_err():
"""Input/output error response in bytes."""
return b"SPAMD/1.5 74 EX_IOERR\r\n\r\n"
@pytest.fixture
def ex_temp_fail():
"""Temporary failure error response in bytes."""
return b"SPAMD/1.5 75 EX_TEMPFAIL\r\n\r\n"
@pytest.fixture
def ex_protocol():
"""Protocol error response in bytes."""
return b"SPAMD/1.5 76 EX_PROTOCOL\r\n\r\n"
@pytest.fixture
def ex_no_perm():
"""No permission error response in bytes."""
return b"SPAMD/1.5 77 EX_NOPERM\r\n\r\n"
@pytest.fixture
def ex_config():
"""Configuration error response in bytes."""
return b"SPAMD/1.5 78 EX_CONFIG\r\n\r\n"
@pytest.fixture
def ex_timeout():
"""Timeout error response in bytes."""
return b"SPAMD/1.5 79 EX_TIMEOUT\r\n\r\n"
@pytest.fixture
def ex_undefined():
"""Undefined exception in bytes."""
return b"SPAMD/1.5 999 EX_UNDEFINED\r\n\r\n"
@pytest.fixture(scope="session")
def hostname():
return "localhost"
@pytest.fixture(scope="session")
def ip_address(hostname):
return gethostbyname(hostname)
@pytest.fixture(scope="session")
def tcp_port():
return 1783
@pytest.fixture(scope="session")
def ssl_port():
return 11783
@pytest.fixture(scope="session")
def unix_socket(tmp_path_factory):
return str(tmp_path_factory.mktemp("sockets") / "spamd.sock")
# Integration fixtures
@pytest.fixture(scope="session")
def create_certificate(tmp_path_factory, hostname, ip_address):
certs_tmp_path = tmp_path_factory.mktemp("localhost_certs")
ca_path = certs_tmp_path / "ca.pem"
cert_path = certs_tmp_path / "cert.pem"
ca = trustme.CA()
cert = ca.issue_cert(hostname, ip_address)
ca.cert_pem.write_to_path(ca_path)
cert.private_key_and_cert_chain_pem.write_to_path(cert_path)
yield ca_path, cert_path
@pytest.fixture(scope="session")
def certificate_authority(create_certificate):
yield create_certificate[0]
@pytest.fixture(scope="session")
def certificate(create_certificate):
yield create_certificate[1]
@pytest.fixture(scope="session")
def spamd(
tmp_path_factory, ip_address, tcp_port, ssl_port, unix_socket, certificate, request
):
# Setup log file
spamd_path = str(Path(which("spamd")).parent)
log_file = tmp_path_factory.mktemp("spamd") / "spamd.log"
# Configure options
options = [
f"--syslog={str(log_file)}",
"--local",
"--allow-tell",
f"--listen={ip_address}:{tcp_port}",
f"--listen=ssl:{ip_address}:{ssl_port}",
"--server-key",
f"{certificate}",
"--server-cert",
f"{certificate}",
]
if sys.platform != "win32":
options += [f"--socketpath={unix_socket}"]
# Spawn spamd
process = Popen(
[which("spamd"), *options],
cwd=spamd_path,
stdout=DEVNULL,
stderr=DEVNULL,
universal_newlines=True,
)
# Check the log to see if spamd is running
timeout = datetime.datetime.utcnow() + datetime.timedelta(
seconds=request.config.getoption("--spamd-process-timeout")
)
while not log_file.exists():
if datetime.datetime.utcnow() > timeout:
raise TimeoutError
running = False
spamd_start = "info: spamd: server started on"
with open(str(log_file), "r") as log:
while not running:
if datetime.datetime.utcnow() > timeout:
raise TimeoutError
log.seek(0)
for line in log:
if spamd_start in line:
running = True
break
if not running:
raise ChildProcessError
yield
# Stop spamd
process.terminate()
try:
process.wait(timeout=5)
except TimeoutExpired:
process.kill()
process.wait(timeout=5)
|
py | b40ab21a42d3ae56d9925d11985b27167dfb109e | from contextlib import ExitStack
from unittest.mock import Mock, call, patch
import pytest
from evernote.api.client import EvernoteClient
import synctogit.evernote.auth
from synctogit.evernote.auth import InteractiveAuth
@pytest.fixture
def mock_prompt_toolkit():
with ExitStack() as st:
shortcuts = ["button_dialog", "input_dialog", "yes_no_dialog"]
mocked = {}
mocked.update(
{
m: st.enter_context(patch.object(synctogit.evernote.auth, m))
for m in shortcuts
}
)
# Not really from the prompt_toolkit, but it is just convenient
# to mock it there as well.
mocked["wait_for_enter"] = st.enter_context(
patch.object(synctogit.evernote.auth, "wait_for_enter")
)
yield mocked
@pytest.fixture
def auth_params():
return dict(
consumer_key="c88l-gal",
consumer_secret="p0sswaD",
callback_url="https://localhost:63543/non-existing-url",
sandbox=True,
)
@pytest.fixture
def mock_evernote_client():
evernote_client = Mock(spec=EvernoteClient)
with patch.object(
synctogit.evernote.auth.InteractiveAuth,
"_evernote_client",
return_value=evernote_client,
):
yield evernote_client
def test_flow_bundled_oauth(mock_evernote_client, mock_prompt_toolkit, auth_params):
m = mock_prompt_toolkit
# Continue? -- True
# Method? -- oauth
# Bundled? -- True
# Redirection url? -- https://localhost...
m["yes_no_dialog"].side_effect = [True, True]
m["button_dialog"].side_effect = ["oauth"]
m["input_dialog"].side_effect = [
"https://localhost:63543/non-existing-url?oauth_token=AA&oauth_verifier=BB",
]
mock_evernote_client.get_request_token.side_effect = [
{
"oauth_token_secret": "WHOAH",
"oauth_token": "AAAAAA.OOOOOOO.UUUUUUUU",
"oauth_callback_confirmed": "true",
}
]
mock_evernote_client.get_authorize_url.side_effect = [
"https://www.evernote.com/OAuth.action?oauth_token=AAAAAA.OOOOOOO.UUUUUUUU",
]
mock_evernote_client.get_access_token.side_effect = ["YOU.WON.THIS.TOKEN"]
auth = InteractiveAuth(**auth_params)
assert auth.run() == "YOU.WON.THIS.TOKEN"
assert mock_evernote_client.get_access_token.call_args == call(
"AAAAAA.OOOOOOO.UUUUUUUU", "WHOAH", "BB"
)
assert 1 == m["wait_for_enter"].call_count
# TODO:
# - oauth custom app flow
# - devtoken flow
# - aborts
# - token validation
|
py | b40ab279a38d1328eb8140c419ce7d27b2f42183 | #-*-coding:utf-8-*-
#引入库
import pyaudio
import wave
import sys
import numpy as np
import time
# 定义数据流块
CHUNK = 1024
CHANNELS = 2
RATE = 8000 #44100
RECORD_SECONDS = 5
def playaudio(filename='temp.wav'):
wf = wave.open(filename, 'rb')
p = pyaudio.PyAudio()
# 打开数据流
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
# 读取数据
data = wf.readframes(CHUNK)
# 播放
while data != '':
stream.write(data)
data = wf.readframes(CHUNK)
# 停止数据流
stream.stop_stream()
stream.close()
# 关闭 PyAudio
p.terminate()
def getvoice():
FORMAT = pyaudio.paInt16
WAVE_OUTPUT_FILENAME = "output.wav"
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
print("* recording")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("* done recording")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
def waitRecord(threshold, timer):
FORMAT = pyaudio.paInt16
WAVE_OUTPUT_FILENAME = "temp1.wav"
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
print("开始监听")
temp = 0
frames = []
time1=time.time()
Flag = False
while True:
# print '等待录制',temp
data = stream.read(CHUNK)
frames.append(data)
audio_data = np.fromstring(data, dtype=np.short)
temp = np.max(audio_data)
while temp > threshold :
Flag = True
# print '录制中,当前阈值:',temp
data = stream.read(CHUNK)
frames.append(data)
audio_data = np.fromstring(data, dtype=np.short)
temp = np.max(audio_data)
time1 = time.time()
if Flag:
if time.time()-time1>timer:
break
else:
frames.pop()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
stream.stop_stream()
stream.close()
p.terminate()
print '录制结束'
|
py | b40ab2c55da09fc8293c30d8c82ac9ddd2afd531 | import flowio
import numpy
f = flowio.FlowData('fcs_files/data1.fcs')
n = numpy.reshape(f.events, (-1, f.channel_count))
print(n.shape)
|
py | b40ab40f8acf830ef492726cc0a70819ba7d854d | #! /usr/bin/env python3
"""Agent that executes random actions"""
# import gym
import argparse
import numpy as np
from mujoco import ObjType
from environment.arm2pos import Arm2PosEnv
from environment.base import distance_between
from mouse_control import run_tests
saved_pos = None
def run():
env = Arm2PosEnv(continuous=False, max_steps=9999999, neg_reward=True, action_multiplier=.1)
total_r = 0
while True:
keypress = env.sim.get_last_key_press()
if keypress == ' ':
print(env._gripper_pos())
try:
action = int(keypress)
assert env.action_space.contains(action)
except (KeyError, TypeError, AssertionError, ValueError):
action = 0
assert isinstance(action, int)
obs, r, done, _ = env.step(action)
run_tests(env, obs)
total_r += r
if done:
print(total_r)
total_r = 0
env.reset()
print('\nresetting')
env.render(labels={'x': env._goal()[0]})
assert not env._currently_failed()
def assert_equal(val1, val2, atol=1e-5):
for a, b in zip(val1, val2):
assert np.allclose(a, b, atol=atol), "{} vs. {}".format(a, b)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port', type=int, default=None)
args = parser.parse_args()
run()
|
py | b40ab434927bcf4d4ec0f7a6385a89c8723dbef8 | #
# PySNMP MIB module H3C-E1T1VI-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/H3C-E1T1VI-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:22:08 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint")
h3cCommon, = mibBuilder.importSymbols("HUAWEI-3COM-OID-MIB", "h3cCommon")
ifDescr, ifIndex = mibBuilder.importSymbols("IF-MIB", "ifDescr", "ifIndex")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, Unsigned32, Bits, Gauge32, NotificationType, TimeTicks, ModuleIdentity, MibIdentifier, Integer32, IpAddress, iso, ObjectIdentity, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "Unsigned32", "Bits", "Gauge32", "NotificationType", "TimeTicks", "ModuleIdentity", "MibIdentifier", "Integer32", "IpAddress", "iso", "ObjectIdentity", "Counter32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
h3cE1T1VI = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 10, 2, 76))
h3cE1T1VI.setRevisions(('2010-04-08 18:55', '2009-06-08 17:41', '2007-04-05 15:42',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: h3cE1T1VI.setRevisionsDescriptions(('To fix bugs in the MIB file.', 'To fix bugs in the MIB file.', 'Initial version of this MIB module.',))
if mibBuilder.loadTexts: h3cE1T1VI.setLastUpdated('201004081855Z')
if mibBuilder.loadTexts: h3cE1T1VI.setOrganization('Hangzhou H3C Technologies Co., Ltd.')
if mibBuilder.loadTexts: h3cE1T1VI.setContactInfo('Platform Team H3C Technologies Co., Ltd. Hai-Dian District Beijing P.R. China http://www.h3c.com Zip: 100085')
if mibBuilder.loadTexts: h3cE1T1VI.setDescription('This MIB provides E1/T1 voice interface information that are excluded by RFC 1213 and RFC 2233')
h3cE1T1VITable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 2, 76, 1), )
if mibBuilder.loadTexts: h3cE1T1VITable.setStatus('current')
if mibBuilder.loadTexts: h3cE1T1VITable.setDescription('This table contains E1/T1 voice interface information, such as the total number of using time slot.')
h3cE1T1VIEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 2, 76, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: h3cE1T1VIEntry.setStatus('current')
if mibBuilder.loadTexts: h3cE1T1VIEntry.setDescription('This entry contains E1/T1 voice interface information. The index of this Entry is ifIndex defined in ifTable of RFC1213-MIB.')
h3cE1T1VIUsingTimeslots = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 76, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cE1T1VIUsingTimeslots.setStatus('current')
if mibBuilder.loadTexts: h3cE1T1VIUsingTimeslots.setDescription('The total number of using time slots on this interface.')
h3cE1T1VIUsingTimeslotsRatio = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 76, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cE1T1VIUsingTimeslotsRatio.setStatus('current')
if mibBuilder.loadTexts: h3cE1T1VIUsingTimeslotsRatio.setDescription('The total ratio of using time slots on this interface.')
h3cE1T1VINotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 2, 76, 2))
h3cE1T1VITrapPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 2, 76, 2, 0))
h3cE1T1VITrapTimeSlot = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 2, 76, 2, 0, 1)).setObjects(("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
if mibBuilder.loadTexts: h3cE1T1VITrapTimeSlot.setStatus('current')
if mibBuilder.loadTexts: h3cE1T1VITrapTimeSlot.setDescription('This trap is sent to the manager under the following condidion: All the available time slots of a E1/T1 voice interface has been in use.')
h3cE1T1VIGeneral = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 2, 76, 3))
h3cE1T1VITrapTimeSlotEnable = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 76, 3, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cE1T1VITrapTimeSlotEnable.setStatus('current')
if mibBuilder.loadTexts: h3cE1T1VITrapTimeSlotEnable.setDescription('This attribute controls whether the h3cE1T1VITrapTimeSlot trap will be sent or not.')
mibBuilder.exportSymbols("H3C-E1T1VI-MIB", h3cE1T1VIUsingTimeslots=h3cE1T1VIUsingTimeslots, h3cE1T1VITable=h3cE1T1VITable, PYSNMP_MODULE_ID=h3cE1T1VI, h3cE1T1VI=h3cE1T1VI, h3cE1T1VIEntry=h3cE1T1VIEntry, h3cE1T1VIUsingTimeslotsRatio=h3cE1T1VIUsingTimeslotsRatio, h3cE1T1VITrapTimeSlot=h3cE1T1VITrapTimeSlot, h3cE1T1VITrapTimeSlotEnable=h3cE1T1VITrapTimeSlotEnable, h3cE1T1VINotifications=h3cE1T1VINotifications, h3cE1T1VIGeneral=h3cE1T1VIGeneral, h3cE1T1VITrapPrefix=h3cE1T1VITrapPrefix)
|
py | b40ab470b334d0581b56faeb0b02f827a66321c9 | """
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.graph.graph import Graph
from mo.ops.op import Op
from mo.front.common.partial_infer.random_uniform import tf_random_uniform_infer
class RandomUniform(Op):
op = 'RandomUniform'
enabled = True
def __init__(self, graph: Graph, attrs: dict):
super().__init__(graph, {
'type': __class__.op,
'op': __class__.op,
'infer': tf_random_uniform_infer,
'in_ports_count': 1,
'out_ports_count': 1,
}, attrs)
def supported_attrs(self):
return [
'T',
'dtype',
'seed',
'seed2',
'minval',
'maxval'
]
def backend_attrs(self):
return [
'T',
'dtype',
'seed',
'seed2',
'minval',
'maxval'
]
|
py | b40ab47e8a309340f2873afc9c55f722eb630d23 | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
from concurrent import futures
import grpc
import polymetis_pb2
import polymetis_pb2_grpc
from .third_party.robotiq_2finger_grippers.robotiq_2f_gripper import (
Robotiq2FingerGripper,
)
class RobotiqGripperServer(polymetis_pb2_grpc.GripperServerServicer):
"""gRPC server that exposes a Robotiq gripper controls to the client
Communicates with the gripper through modbus
"""
def __init__(self, comport):
self.gripper = Robotiq2FingerGripper(comport=comport)
if not self.gripper.init_success:
raise Exception(f"Unable to open comport to {comport}")
if not self.gripper.getStatus():
raise Exception(f"Failed to contact gripper on port {comport}... ABORTING")
print("Activating gripper...")
self.gripper.activate_emergency_release()
self.gripper.sendCommand()
time.sleep(1)
self.gripper.deactivate_emergency_release()
self.gripper.sendCommand()
time.sleep(1)
self.gripper.activate_gripper()
self.gripper.sendCommand()
if (
self.gripper.is_ready()
and self.gripper.sendCommand()
and self.gripper.getStatus()
):
print("Activated.")
else:
raise Exception(f"Unable to activate!")
def GetState(self, request, context):
self.gripper.getStatus()
state = polymetis_pb2.GripperState()
state.timestamp.GetCurrentTime()
state.width = self.gripper.get_pos()
state.max_width = self.gripper.stroke
state.is_grasped = self.gripper.object_detected()
state.is_moving = self.gripper.is_moving()
return state
def Goto(self, request, context):
self.gripper.goto(pos=request.width, vel=request.speed, force=request.force)
self.gripper.sendCommand()
return polymetis_pb2.Empty()
def Grasp(self, request, context):
self.gripper.goto(pos=request.width, vel=request.speed, force=request.force)
self.gripper.sendCommand()
return polymetis_pb2.Empty()
class GripperServerLauncher:
def __init__(self, ip="localhost", port="50052", comport="/dev/ttyUSB0"):
self.address = f"{ip}:{port}"
self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=2))
polymetis_pb2_grpc.add_GripperServerServicer_to_server(
RobotiqGripperServer(comport), self.server
)
self.server.add_insecure_port(self.address)
def run(self):
self.server.start()
print(f"Robotiq-2F gripper server running at {self.address}.")
self.server.wait_for_termination()
|
py | b40ab4997fb5595befd0c48eb8f3183d6bfcdb07 | __version__ = '1.0.1'
import string
DEFAULT_ALPHABET = string.digits + string.ascii_lowercase + string.ascii_uppercase
def short(number: int, alphabet: list=DEFAULT_ALPHABET):
""" Converts an integer to a hexdigest like string -
except not 16 length but provided alphabet length """
if not isinstance(number, int):
raise TypeError('number must be an integer')
result = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
result = alphabet[i] + result
return result
def expand(string: str, alphabet=DEFAULT_ALPHABET):
""" Enter hexdigest like string - return integer (base 10) """
result = 0
char_list = list(enumerate(string[::-1]))
for char in char_list:
result += (alphabet.index(char[1]) * len(alphabet) ** char[0])
return result
|
py | b40ab4b53696b662b313b5f398f642263f038d8c | """Preprocessor module."""
import copy
import inspect
import logging
from pprint import pformat
from iris.cube import Cube
from .._provenance import TrackedFile
from .._task import BaseTask
from ..cmor.check import cmor_check_data, cmor_check_metadata
from ..cmor.fix import fix_data, fix_file, fix_metadata
from ._ancillary_vars import add_fx_variables, remove_fx_variables
from ._area import (
area_statistics,
extract_named_regions,
extract_region,
extract_shape,
meridional_statistics,
zonal_statistics,
)
from ._bias import bias
from ._cycles import amplitude
from ._derive import derive
from ._detrend import detrend
from ._io import (
_get_debug_filename,
cleanup,
concatenate,
load,
save,
write_metadata,
)
from ._mask import (
mask_above_threshold,
mask_below_threshold,
mask_fillvalues,
mask_glaciated,
mask_inside_range,
mask_landsea,
mask_landseaice,
mask_multimodel,
mask_outside_range,
)
from ._multimodel import multi_model_statistics
from ._other import clip
from ._regrid import extract_levels, extract_location, extract_point, regrid
from ._time import (
annual_statistics,
anomalies,
climate_statistics,
clip_timerange,
daily_statistics,
decadal_statistics,
extract_month,
extract_season,
extract_time,
hourly_statistics,
monthly_statistics,
regrid_time,
resample_hours,
resample_time,
seasonal_statistics,
timeseries_filter,
)
from ._trend import linear_trend, linear_trend_stderr
from ._units import convert_units
from ._volume import (
depth_integration,
extract_trajectory,
extract_transect,
extract_volume,
volume_statistics,
)
from ._weighting import weighting_landsea_fraction
logger = logging.getLogger(__name__)
__all__ = [
# File reformatting/CMORization
'fix_file',
# Load cubes from file
'load',
# Derive variable
'derive',
# Metadata reformatting/CMORization
'fix_metadata',
# Concatenate all cubes in one
'concatenate',
'cmor_check_metadata',
# Extract years given by dataset keys (start_year and end_year)
'clip_timerange',
# Data reformatting/CMORization
'fix_data',
'cmor_check_data',
# Load fx_variables in cube
'add_fx_variables',
# Time extraction (as defined in the preprocessor section)
'extract_time',
'extract_season',
'extract_month',
'resample_hours',
'resample_time',
# Level extraction
'extract_levels',
# Weighting
'weighting_landsea_fraction',
# Mask landsea (fx or Natural Earth)
'mask_landsea',
# Natural Earth only
'mask_glaciated',
# Mask landseaice, sftgif only
'mask_landseaice',
# Regridding
'regrid',
# Point interpolation
'extract_point',
'extract_location',
# Masking missing values
'mask_multimodel',
'mask_fillvalues',
'mask_above_threshold',
'mask_below_threshold',
'mask_inside_range',
'mask_outside_range',
# Other
'clip',
# Region selection
'extract_region',
'extract_shape',
'extract_volume',
'extract_trajectory',
'extract_transect',
# 'average_zone': average_zone,
# 'cross_section': cross_section,
'detrend',
# Grid-point operations
'extract_named_regions',
'depth_integration',
'area_statistics',
'volume_statistics',
# Time operations
# 'annual_cycle': annual_cycle,
# 'diurnal_cycle': diurnal_cycle,
'amplitude',
'zonal_statistics',
'meridional_statistics',
'hourly_statistics',
'daily_statistics',
'monthly_statistics',
'seasonal_statistics',
'annual_statistics',
'decadal_statistics',
'climate_statistics',
'anomalies',
'regrid_time',
'timeseries_filter',
'linear_trend',
'linear_trend_stderr',
# Convert units
'convert_units',
# Multi model statistics
'multi_model_statistics',
# Bias calculation
'bias',
# Remove fx_variables from cube
'remove_fx_variables',
# Save to file
'save',
'cleanup',
]
TIME_PREPROCESSORS = [
'clip_timerange',
'extract_time',
'extract_season',
'extract_month',
'daily_statistics',
'monthly_statistics',
'seasonal_statistics',
'annual_statistics',
'decadal_statistics',
'climate_statistics',
'anomalies',
'regrid_time',
]
DEFAULT_ORDER = tuple(__all__)
"""
By default, preprocessor functions are applied in this order.
"""
# The order of initial and final steps cannot be configured
INITIAL_STEPS = DEFAULT_ORDER[:DEFAULT_ORDER.index('add_fx_variables') + 1]
FINAL_STEPS = DEFAULT_ORDER[DEFAULT_ORDER.index('remove_fx_variables'):]
MULTI_MODEL_FUNCTIONS = {
'bias',
'multi_model_statistics',
'mask_multimodel',
'mask_fillvalues',
}
def _get_itype(step):
"""Get the input type of a preprocessor function."""
function = globals()[step]
itype = inspect.getfullargspec(function).args[0]
return itype
def check_preprocessor_settings(settings):
"""Check preprocessor settings."""
for step in settings:
if step not in DEFAULT_ORDER:
raise ValueError(
"Unknown preprocessor function '{}', choose from: {}".format(
step, ', '.join(DEFAULT_ORDER)))
function = function = globals()[step]
argspec = inspect.getfullargspec(function)
args = argspec.args[1:]
if not (argspec.varargs or argspec.varkw):
# Check for invalid arguments
invalid_args = set(settings[step]) - set(args)
if invalid_args:
raise ValueError(
"Invalid argument(s): {} encountered for preprocessor "
"function {}. \nValid arguments are: [{}]".format(
', '.join(invalid_args), step, ', '.join(args)))
# Check for missing arguments
defaults = argspec.defaults
end = None if defaults is None else -len(defaults)
missing_args = set(args[:end]) - set(settings[step])
if missing_args:
raise ValueError(
"Missing required argument(s) {} for preprocessor "
"function {}".format(missing_args, step))
# Final sanity check in case the above fails to catch a mistake
try:
signature = inspect.Signature.from_callable(function)
signature.bind(None, **settings[step])
except TypeError:
logger.error(
"Wrong preprocessor function arguments in "
"function '%s'", step)
raise
def _check_multi_model_settings(products):
"""Check that multi dataset settings are identical for all products."""
multi_model_steps = (step for step in MULTI_MODEL_FUNCTIONS
if any(step in p.settings for p in products))
for step in multi_model_steps:
reference = None
for product in products:
settings = product.settings.get(step)
if settings is None:
continue
if reference is None:
reference = product
elif reference.settings[step] != settings:
raise ValueError(
"Unable to combine differing multi-dataset settings for "
"{} and {}, {} and {}".format(reference.filename,
product.filename,
reference.settings[step],
settings))
def _get_multi_model_settings(products, step):
"""Select settings for multi model step."""
_check_multi_model_settings(products)
settings = {}
exclude = set()
for product in products:
if step in product.settings:
settings = product.settings[step]
else:
exclude.add(product)
return settings, exclude
def _run_preproc_function(function, items, kwargs, input_files=None):
"""Run preprocessor function."""
kwargs_str = ",\n".join(
[f"{k} = {pformat(v)}" for (k, v) in kwargs.items()])
if input_files is None:
file_msg = ""
else:
file_msg = (f"\nloaded from original input file(s)\n"
f"{pformat(input_files)}")
logger.debug(
"Running preprocessor function '%s' on the data\n%s%s\nwith function "
"argument(s)\n%s", function.__name__, pformat(items), file_msg,
kwargs_str)
try:
return function(items, **kwargs)
except Exception:
# To avoid very long error messages, we truncate the arguments and
# input files here at a given threshold
n_shown_args = 4
if input_files is not None and len(input_files) > n_shown_args:
n_not_shown_files = len(input_files) - n_shown_args
file_msg = (f"\nloaded from original input file(s)\n"
f"{pformat(input_files[:n_shown_args])}\n(and "
f"{n_not_shown_files:d} further file(s) not shown "
f"here; refer to the debug log for a full list)")
# Make sure that the arguments are indexable
if isinstance(items, (PreprocessorFile, Cube, str)):
items = [items]
if isinstance(items, set):
items = list(items)
if len(items) <= n_shown_args:
data_msg = pformat(items)
else:
n_not_shown_args = len(items) - n_shown_args
data_msg = (f"{pformat(items[:n_shown_args])}\n(and "
f"{n_not_shown_args:d} further argument(s) not shown "
f"here; refer to the debug log for a full list)")
logger.error(
"Failed to run preprocessor function '%s' on the data\n%s%s\nwith "
"function argument(s)\n%s", function.__name__, data_msg, file_msg,
kwargs_str)
raise
def preprocess(items, step, input_files=None, **settings):
"""Run preprocessor."""
logger.debug("Running preprocessor step %s", step)
function = globals()[step]
itype = _get_itype(step)
result = []
if itype.endswith('s'):
result.append(_run_preproc_function(function, items, settings,
input_files=input_files))
else:
for item in items:
result.append(_run_preproc_function(function, item, settings,
input_files=input_files))
items = []
for item in result:
if isinstance(item, (PreprocessorFile, Cube, str)):
items.append(item)
else:
items.extend(item)
return items
def get_step_blocks(steps, order):
"""Group steps into execution blocks."""
blocks = []
prev_step_type = None
for step in order[order.index('load') + 1:order.index('save')]:
if step in steps:
step_type = step in MULTI_MODEL_FUNCTIONS
if step_type is not prev_step_type:
block = []
blocks.append(block)
prev_step_type = step_type
block.append(step)
return blocks
class PreprocessorFile(TrackedFile):
"""Preprocessor output file."""
def __init__(self, attributes, settings, ancestors=None):
super(PreprocessorFile, self).__init__(attributes['filename'],
attributes, ancestors)
self.settings = copy.deepcopy(settings)
if 'save' not in self.settings:
self.settings['save'] = {}
self.settings['save']['filename'] = self.filename
# self._input_files always contains the original input files;
# self.files may change in the preprocessing chain (e.g., by the step
# fix_file)
self._input_files = [a.filename for a in ancestors or ()]
self.files = copy.deepcopy(self._input_files)
self._cubes = None
self._prepared = False
def _input_files_for_log(self):
"""Do not log input files twice in output log."""
if self.files == self._input_files:
return None
return self._input_files
def check(self):
"""Check preprocessor settings."""
check_preprocessor_settings(self.settings)
def apply(self, step, debug=False):
"""Apply preprocessor step to product."""
if step not in self.settings:
raise ValueError(
"PreprocessorFile {} has no settings for step {}".format(
self, step))
self.cubes = preprocess(self.cubes, step,
input_files=self._input_files,
**self.settings[step])
if debug:
logger.debug("Result %s", self.cubes)
filename = _get_debug_filename(self.filename, step)
save(self.cubes, filename)
def prepare(self):
"""Apply preliminary file operations on product."""
if not self._prepared:
for step in DEFAULT_ORDER[:DEFAULT_ORDER.index('load')]:
if step in self.settings:
self.files = preprocess(
self.files, step,
input_files=self._input_files_for_log(),
**self.settings[step])
self._prepared = True
@property
def cubes(self):
"""Cubes."""
if self.is_closed:
self.prepare()
self._cubes = preprocess(self.files, 'load',
input_files=self._input_files_for_log(),
**self.settings.get('load', {}))
return self._cubes
@cubes.setter
def cubes(self, value):
self._cubes = value
def save(self):
"""Save cubes to disk."""
self.files = preprocess(self._cubes, 'save',
input_files=self._input_files,
**self.settings['save'])
self.files = preprocess(self.files, 'cleanup',
input_files=self._input_files,
**self.settings.get('cleanup', {}))
def close(self):
"""Close the file."""
if self._cubes is not None:
self.save()
self._cubes = None
self.save_provenance()
@property
def is_closed(self):
"""Check if the file is closed."""
return self._cubes is None
def _initialize_entity(self):
"""Initialize the entity representing the file."""
super(PreprocessorFile, self)._initialize_entity()
settings = {
'preprocessor:' + k: str(v)
for k, v in self.settings.items()
}
self.entity.add_attributes(settings)
# TODO: use a custom ProductSet that raises an exception if you try to
# add the same Product twice
def _apply_multimodel(products, step, debug):
"""Apply multi model step to products."""
settings, exclude = _get_multi_model_settings(products, step)
logger.debug("Applying %s to\n%s", step,
'\n'.join(str(p) for p in products - exclude))
result = preprocess(products - exclude, step, **settings)
products = set(result) | exclude
if debug:
for product in products:
logger.debug("Result %s", product.filename)
if not product.is_closed:
for cube in product.cubes:
logger.debug("with cube %s", cube)
return products
class PreprocessingTask(BaseTask):
"""Task for running the preprocessor."""
def __init__(
self,
products,
ancestors=None,
name='',
order=DEFAULT_ORDER,
debug=None,
write_ncl_interface=False,
):
"""Initialize."""
_check_multi_model_settings(products)
super().__init__(ancestors=ancestors, name=name, products=products)
self.order = list(order)
self.debug = debug
self.write_ncl_interface = write_ncl_interface
def _initialize_product_provenance(self):
"""Initialize product provenance."""
for product in self.products:
product.initialize_provenance(self.activity)
# Hacky way to initialize the multi model products as well.
step = 'multi_model_statistics'
input_products = [p for p in self.products if step in p.settings]
if input_products:
statistic_products = input_products[0].settings[step].get(
'output_products', {}).values()
for product in statistic_products:
product.initialize_provenance(self.activity)
def _run(self, _):
"""Run the preprocessor."""
self._initialize_product_provenance()
steps = {
step
for product in self.products for step in product.settings
}
blocks = get_step_blocks(steps, self.order)
for block in blocks:
logger.debug("Running block %s", block)
if block[0] in MULTI_MODEL_FUNCTIONS:
for step in block:
self.products = _apply_multimodel(self.products, step,
self.debug)
else:
for product in self.products:
logger.debug("Applying single-model steps to %s", product)
for step in block:
if step in product.settings:
product.apply(step, self.debug)
if block == blocks[-1]:
product.close()
for product in self.products:
product.close()
metadata_files = write_metadata(self.products,
self.write_ncl_interface)
return metadata_files
def __str__(self):
"""Get human readable description."""
order = [
step for step in self.order
if any(step in product.settings for product in self.products)
]
products = '\n\n'.join('\n'.join([str(p), pformat(p.settings)])
for p in self.products)
txt = "{}: {}\norder: {}\n{}\n{}".format(
self.__class__.__name__,
self.name,
order,
products,
self.print_ancestors(),
)
return txt
|
py | b40ab59eea068e82b6ac6d51a00452a5407ee305 | def nonConstructibleChange(coins):
coins.sort()
# Write your code here.
changeable=0
for coin in coins:
if coin>changeable+1:
return changeable+1
changeable+=coin
return changeable+1
|
py | b40ab5e29cb9b49b9c4c1ff24f2f8221a6115e22 | _formats = {
'ymd' : '{d.year}-{d.month}-{d.day}',
'mdy' : '{d.month}/{d.day}/{d.year}',
'dmy' : '{d.day}/{d.month}/{d.year}'
}
class Date:
def __init__(self, year, month, day):
self.year = year
self.month = month
self.day = day
def __format__(self, code):
if code == '':
code = 'ymd'
fmt = _formats[code]
return fmt.format(d=self)
|
py | b40ab62a8a352661d74a0d7be1441361d495c2c0 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-21 14:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='SampleArticle',
fields=[
],
options={
'proxy': True,
},
bases=('blog.article',),
),
migrations.RemoveField(
model_name='article',
name='is_published',
),
migrations.AddField(
model_name='article',
name='hits',
field=models.IntegerField(default=0, editable=False),
),
migrations.AddField(
model_name='article',
name='is_sample',
field=models.BooleanField(default=False, editable=False),
),
migrations.AddField(
model_name='article',
name='published_at',
field=models.DateTimeField(editable=False, null=True),
),
migrations.AddField(
model_name='article',
name='status',
field=models.CharField(choices=[(b'draft', b'Draft'), (b'for_pub', b'Ready for publishing'), (b'published', b'Published')], default=b'draft', max_length=10),
),
migrations.AddField(
model_name='article',
name='words_count',
field=models.IntegerField(default=0, editable=False),
),
migrations.AlterModelOptions(
name='article',
options={'ordering': ['-created_at'], 'permissions': (('view_article_hits', 'Can view article hits'), ('change_article_slug', 'Can change article slug'))},
),
]
|
py | b40ab6da897f56aad3d0ac1e8bae4bbaacac4233 | ###############################################################################
# PyDial: Multi-domain Statistical Spoken Dialogue System Software
###############################################################################
#
# Copyright 2015 - 2018
# Cambridge University Engineering Department Dialogue Systems Group
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
'''
RuleSemOMethods.py - Classes for all Rule based Generators
===========================================================
Copyright CUED Dialogue Systems Group 2015 - 2017
.. seealso:: CUED Imports/Dependencies:
import :mod:`semo.SemOManager` |.|
import :mod:`utils.Settings` |.|
import :mod:`utils.DiaAct` |.|
import :mod:`utils.dact` |.|
import :mod:`utils.ContextLogger` |.|
import :mod:`ontology.OntologyUtils` |.|
************************
'''
__author__ = "cued_dialogue_systems_group"
'''
Modifications History
===============================
Date Author Description
===============================
Sep 07 2016 lmr46 Shuffle list of items in DiaAct.
Jul 20 2016 lmr46 Supporting only the domains configured in the config-file
Domain in _set_NL_DOMAINS ( Note that these domains are hancoded here.)
'''
import tokenize
from collections import defaultdict
import re
import os.path
import copy
from random import shuffle
import SemOManager
from utils import Settings, DiaAct, ContextLogger, dact
from ontology import OntologyUtils
logger = ContextLogger.getLogger('')
def parse_output(input_string):
'''Utility function used within this file's classes.
:param input_string: None
:type input_string: str
'''
from utils import Scanner
output_scanner = Scanner.Scanner(input_string)
output_scanner.next()
words = []
prevWasVariable = False
while output_scanner.cur[0] != tokenize.ENDMARKER:
if output_scanner.cur[0] == tokenize.NAME:
words.append(output_scanner.cur[1])
output_scanner.next()
prevWasVariable=False
elif output_scanner.cur[1] == '$':
variable = '$'
output_scanner.next()
variable += output_scanner.cur[1]
words.append(variable)
output_scanner.next()
prevWasVariable=True
elif output_scanner.cur[1] == '%':
function = '%'
output_scanner.next()
while output_scanner.cur[1] != ')':
function += output_scanner.cur[1]
output_scanner.next()
function += output_scanner.cur[1]
words.append(function)
output_scanner.next()
prevWasVariable=True
else:
if prevWasVariable:
words.append(output_scanner.cur[1])
output_scanner.next()
else:
words[-1] += output_scanner.cur[1]
output_scanner.next()
prevWasVariable=False
return words
#------------------------------------------------------------------------
# RULE BASED SEMO CLASSES
#------------------------------------------------------------------------
class PassthroughSemO(SemOManager.SemO):
'''**Does nothing** - simply pass system act directly through.
'''
def __init__(self):
pass
def generate(self, act):
'''
:param act: the dialogue act to be verbalized
:type act: str
:returns: **EXACT** act as was input
'''
return act
class BasicTemplateRule(object):
'''
The template rule corresponds to a single line in a template rules file.
This consists of an act (including non-terminals) that the rule applies to with an output string to generate
(again including non-terminals).
Example::
select(food=$X, food=dontcare) : "Sorry would you like $X food or you dont care";
self.rue_items = {food: [$X, dontcare]}
'''
def __init__(self, scanner):
'''
Reads a template rule from the scanner. This should have the form 'act: string' with optional comments.
'''
self.rule_act = self.read_from_stream(scanner)
rule_act_str = str(self.rule_act)
if '))' in rule_act_str:
logger.warning('Two )): ' + rule_act_str)
if self.rule_act.act == 'badact':
logger.error('Generated bac act rule: ' + rule_act_str)
scanner.check_token(':', 'Expected \':\' after ' + rule_act_str)
scanner.next()
if scanner.cur[0] not in [tokenize.NAME, tokenize.STRING]:
raise SyntaxError('Expected string after colon')
# Parse output string.
self.output = scanner.cur[1].strip('"\'').strip()
self.output_list = parse_output(self.output)
scanner.next()
scanner.check_token(';', 'Expected \';\' at the end of string')
scanner.next()
# rule_items = {slot: [val1, val2, ...], ...}
self.rule_items = defaultdict(list)
for item in self.rule_act.items:
self.rule_items[item.slot].append(item.val)
def __str__(self):
s = str(self.rule_act)
s += ' : '
s += self.output + ';'
return s
def read_from_stream(self, scanner):
sin = ''
while scanner.cur[1] != ';' and scanner.cur[0] != tokenize.ENDMARKER and scanner.cur[1] != ':':
sin += scanner.cur[1]
scanner.next()
return DiaAct.DiaAct(sin)
def generate(self, input_act):
'''
Generates a text from using this rule on the given input act.
Also edits the passed variables to store the number of matched items,
number of missing items and number of matched utterance types.
Note that the order of the act and rule acts must be exactly the same.
:returns: output, match_count, missing_count, type_match_count
'''
type_match_count = 0
match_count = 0
missing_count = 0
non_term_map = {}
if self.rule_act.act == input_act.act:
type_match_count += 1
match_count, missing_count, non_term_map = self.match_act(input_act)
return self.generate_from_map(non_term_map), match_count, missing_count, type_match_count, non_term_map
def generate_from_map(self, non_term_map):
'''
Does the generation by substituting values in non_term_map.
:param non_term_map: {$X: food, ...}
:return: list of generated words
'''
num_missing = 0
word_list = copy.deepcopy(self.output_list)
for i, word in enumerate(word_list):
if word[0] == '$': # Variable $X
if word not in non_term_map:
# logger.debug('%s not found in non_term_map %s.' % (word, str(non_term_map)))
num_missing += 1
else:
word_list[i] = non_term_map[word]
# %$ function in output will be transformed later.
return word_list
def match_act(self, act):
'''
This function matches the given act against the slots in rule_map
any slot-value pairs that are matched will be placed in the non-terminal map.
:param act: The act to match against (i.e. the act that is being transformed, with no non-terminals)
:returns (found_count, num_missing): found_count = # of items matched, num_missing = # of missing values.
'''
non_term_map = {} # Any mathced non-terminals are placed here.
rules = {}
dollar_rules = {}
for slot in self.rule_items:
if slot[0] == '$':
# Copy over rules that have an unspecified slot.
value_list = copy.deepcopy(self.rule_items[slot])
if len(value_list) > 1:
logger.error('Non-terminal %s is mapped to multiple values %s' % (slot, str(value_list)))
dollar_rules[slot] = value_list[0]
else:
# Copy over rules that have a specified slot.
rules[slot] = copy.deepcopy(self.rule_items[slot])
logger.debug(' rules: ' + str(rules))
logger.debug('$rules: ' + str(dollar_rules))
found_count = 0
# For each item in the given system action.
rnditems=act.items
shuffle(rnditems)
for item in rnditems:
found = False
if item.slot in rules:
if item.val in rules[item.slot]:
# Found this exact terminal in the rules. (e.g. food=none)
found = True
found_count += 1
rules[item.slot].remove(item.val)
else:
# Found the rule containing the same slot but no terminal.
# Use the first rule which has a non-terminal.
val = None
for value in rules[item.slot]:
if value[0] == '$':
# Check if we've already assigned this non-terminal.
if value not in non_term_map:
found = True
val = value
break
elif non_term_map[value] == item.val:
# This is a non-terminal so we can re-write it if we've already got it.
# Then this value is the same so that also counts as found.
found = True
val = value
break
if found:
non_term_map[val] = item.val
rules[item.slot].remove(val)
found_count += 1
if not found and len(dollar_rules) > 0:
# The slot doesn't match. Just use the first dollar rule.
for slot in dollar_rules:
if item.val == dollar_rules[slot]: # $X=dontcare
found = True
non_term_map[slot] = item.slot
del dollar_rules[slot]
found_count += 1
break
if not found:
for slot in dollar_rules:
if dollar_rules[slot] is not None and dollar_rules[slot][0] == '$': # $X=$Y
found = True
non_term_map[slot] = item.slot
non_term_map[dollar_rules[slot]] = item.val
del dollar_rules[slot]
found_count += 1
break
num_missing = len([val for sublist in rules.values() for val in sublist])
return found_count, num_missing, non_term_map
class BasicTemplateFunction(object):
'''
A function in the generation rules that converts a group of inputs into an output string.
The use of template functions allows for simplification of the generation file as the way
a given group of variables is generated can be extended over multiple rules.
The format of the function is::
%functionName($param1, $param2, ...) {
p1, p2, ... : "Generation output";}
:param scanner: of :class:`Scanner`
:type scanner: instance
'''
def __init__(self, scanner):
scanner.check_token('%', 'Expected map variable name (with %)')
scanner.next()
self.function_name = '%'+scanner.cur[1]
scanner.next()
scanner.check_token('(', 'Expected open bracket ( after declaration of function')
self.parameter_names = []
while True:
scanner.next()
# print scanner.cur
if scanner.cur[1] == '$':
scanner.next()
self.parameter_names.append(scanner.cur[1])
elif scanner.cur[1] == ')':
break
elif scanner.cur[1] != ',':
raise SyntaxError('Expected variable, comma, close bracket ) in input definition of tempate function rule')
if len(self.parameter_names) == 0:
raise SyntaxError('Must have some inputs in function definition: ' + self.function_name)
scanner.next()
scanner.check_token('{', 'Expected open brace after declaration of function ' + self.function_name)
scanner.next()
self.rules = []
while scanner.cur[1] != '}':
new_rule = BasicTemplateFunctionRule(scanner)
if len(new_rule.inputs) != len(self.parameter_names):
raise SyntaxError('Different numbers of parameters (%d) in rules and definition (%d) for function: %s' %
(len(new_rule.inputs), len(self.parameter_names), self.function_name))
self.rules.append(new_rule)
scanner.next()
def transform(self, inputs):
'''
:param inputs: Array of function arguments.
:returns: None
'''
inputs = [w.replace('not available', 'none') for w in inputs]
for rule in self.rules:
if rule.is_applicable(inputs):
return rule.transform(inputs)
logger.error('In function %s: No rule to transform inputs %s' % (self.function_name, str(inputs)))
class BasicTemplateFunctionRule(object):
'''
A single line of a basic template function. This does a conversion of a group of values into a string.
e.g. p1, p2, ... : "Generation output"
:param scanner: of :class:`Scanner`
:type scanner: instance
'''
def __init__(self, scanner):
'''
Loads a template function rule from the stream. The rule should have the format:
input1, input2 : "output string";
'''
self.inputs = []
self.input_map = {}
while True:
# print scanner.cur
if scanner.cur[1] == '$' or scanner.cur[0] in [tokenize.NUMBER, tokenize.STRING, tokenize.NAME]:
input = scanner.cur[1]
if scanner.cur[1] == '$':
scanner.next()
input += scanner.cur[1]
# Add to lookup table.
self.input_map[input] = len(self.inputs)
self.inputs.append(input.strip('"\''))
scanner.next()
elif scanner.cur[1] == ':':
scanner.next()
break
elif scanner.cur[1] == ',':
scanner.next()
else:
raise SyntaxError('Expected string, comma, or colon in input definition of template function rule.')
if len(self.inputs) == 0:
raise SyntaxError('No inputs specified for template function rule.')
# Parse output string.
scanner.check_token(tokenize.STRING, 'Expected string output for template function rule.')
self.output = scanner.cur[1].strip('\"').strip()
self.output = parse_output(self.output)
scanner.next()
scanner.check_token(';', 'Expected semicolon to end template function rule.')
scanner.next()
def __str__(self):
return str(self.inputs) + ' : ' + str(self.output)
def is_applicable(self, inputs):
'''
Checks if this function rule is applicable for the given inputs.
:param inputs: array of words
:returns: (bool)
'''
if len(inputs) != len(self.inputs):
return False
for i, word in enumerate(self.inputs):
if word[0] != '$' and inputs[i] != word:
return False
return True
def transform(self, inputs):
'''
Transforms the given inputs into the output. All variables in the output list are looked up in the map
and the relevant value from the inputs is chosen.
:param inputs: array of words.
:returns: Transformed string.
'''
result = []
for output_word in self.output:
if output_word[0] == '$':
if output_word not in self.input_map:
logger.error('Could not find variable %s' % output_word)
result.append(inputs[self.input_map[output_word]])
else:
result.append(output_word)
return ' '.join(result)
class BasicTemplateGenerator(object):
'''
The basic template generator loads a list of template-based rules from a string.
These are then applied on any input dialogue act and used to generate an output string.
:param filename: the template rules file
:type filename: str
'''
def __init__(self, filename):
from utils import Scanner
fn = Settings.locate_file(filename)
if os.path.exists(fn):
f = open(fn)
string = f.read()
string.replace('\t', ' ')
file_without_comment = Scanner.remove_comments(string)
scanner = Scanner.Scanner(file_without_comment)
scanner.next()
self.rules = []
self.functions = []
self.function_map = {}
self.parse_rules(scanner)
f.close()
else:
logger.error("Cannot locate template file %s",filename)
def parse_rules(self, scanner):
'''Check the given rules
:param scanner: of :class:`Scanner`
:type scanner: instance
'''
try:
while scanner.cur[0] not in [tokenize.ENDMARKER]:
if scanner.cur[0] == tokenize.NAME:
self.rules.append(BasicTemplateRule(scanner))
elif scanner.cur[1] == '%':
ftn = BasicTemplateFunction(scanner)
self.functions.append(ftn)
self.function_map[ftn.function_name] = ftn
else:
raise SyntaxError('Expected a string or function map but got ' +
scanner.cur[1] + ' at this position while parsing generation rules.')
except SyntaxError as inst:
print inst
def transform(self, sysAct):
'''
Transforms the sysAct from a semantic utterance form to a text form using the rules in the generator.
This function will run the sysAct through all variable rules and will choose the best one according to the
number of matched act types, matched items and missing items.
:param sysAct: input system action (semantic form).
:type sysAct: str
:returns: (str) natural language
'''
input_utt = DiaAct.DiaAct(sysAct)
# FIXME hack to transform system acts with slot op "!=" to "=" and add slot-value pair other=true which is needed by NLG rule base
# assumption: "!=" only appears if there are no further alternatives, ie, inform(name=none, name!=place!, ...)
negFound = False
for item in input_utt.items:
if item.op == "!=":
item.op = u"="
negFound = True
if negFound:
otherTrue = dact.DactItem(u'other',u'=',u'true')
input_utt.items.append(otherTrue)
# Iterate over BasicTemplateRule rules.
best_rule = None
best = None
best_matches = 0
best_type_match = 0
best_missing = 1000
best_non_term_map = None
for rule in self.rules:
logger.debug('Checking Rule %s' % str(rule))
out, matches, missing, type_match, non_term_map = rule.generate(input_utt)
if type_match > 0:
logger.debug('Checking Rule %s: type_match=%d, missing=%d, matches=%d, output=%s' %
(str(rule), type_match, missing, matches, ' '.join(out)))
# Pick up the best rule.
choose_this = False
if type_match > 0:
if missing < best_missing:
choose_this = True
elif missing == best_missing:
if type_match > best_type_match:
choose_this = True
elif type_match == best_type_match and matches > best_matches:
choose_this = True
if choose_this:
best_rule = rule
best = out
best_missing = missing
best_type_match = type_match
best_matches = matches
best_non_term_map = non_term_map
if best_type_match == 1 and best_missing == 0 and best_matches == len(input_utt.items):
break
if best_rule is not None:
if best_missing > 0:
logger.warning('While transforming %s, there were missing items.' % sysAct)
else:
logger.debug('No rule used.')
best = self.compute_ftn(best, best_non_term_map)
return ' '.join(best)
def compute_ftn(self, input_words, non_term_map):
'''
Applies this function to convert a function into a string.
:param input_words: of generated words. Some words might contain function. `(e.g. %count_rest($X) or %$Y_str($P) ...)`
:type input_words: list
:param non_term_map:
:returns: (list) modified input_words
'''
for i, word in enumerate(input_words):
if '%' not in word:
continue
logger.debug('Processing %s in %s...' % (word, str(input_words)))
m = re.search('^([^\(\)]*)\((.*)\)(.*)$', word.strip())
if m is None:
logger.error('Parsing failed in %s' % word.strip())
ftn_name = m.group(1)
ftn_args = [x.strip() for x in m.group(2).split(',')]
remaining = ''
if len(m.groups()) > 2:
remaining = m.group(3)
# Processing function name.
if '$' in ftn_name:
tokens = ftn_name.split('_')
if len(tokens) > 2:
logger.error('More than one underbar _ found in function name %s' % ftn_name)
var = tokens[0][1:]
if var not in non_term_map:
logger.error('Unable to find nonterminal %s in non terminal map.' % var)
ftn_name = ftn_name.replace(var, non_term_map[var])
# Processing function args.
for j, arg in enumerate(ftn_args):
if arg[0] == '%':
logger.error('% in function argument %s' % str(word))
elif arg[0] == '$':
ftn_args[j] = non_term_map[arg]
if ftn_name not in self.function_map:
logger.error('Function name %s is not found.' % ftn_name)
else:
input_words[i] = self.function_map[ftn_name].transform(ftn_args) + remaining
return input_words
class BasicSemO(SemOManager.SemO):
'''
Template-based output generator. Note that the class inheriting from object is important - without this the super method
can not be called -- This relates to 'old-style' and 'new-style' classes in python if interested ...
:parameter [basicsemo] templatefile: The template file to use for generation.
:parameter [basicsemo] emphasis: Generate emphasis tags.
:parameter [basicsemo] emphasisopen: Emphasis open tag (default: <EMPH<).
:parameter [basicsemo] emphasisclose: Emphasis close tag (default: </EMPH<).
'''
def __init__(self, domainTag=None):
template_filename = None
if Settings.config.has_option('semo_'+domainTag, 'templatefile'):
template_filename = str(Settings.config.get('semo_'+domainTag, 'templatefile'))
self.emphasis = False
if Settings.config.has_option('semo_'+domainTag, 'emphasis'):
self.emphasis = Settings.config.getboolean('semo_'+domainTag, 'emphasis')
self.emphasis_open = '<EMPH>'
if Settings.config.has_option('semo_'+domainTag, 'emphasisopen'):
self.emphasis = Settings.config.get('semo_'+domainTag, 'emphasisopen')
self.emphasis_close = '</EMPH>'
if Settings.config.has_option('semo_'+domainTag, 'emphasisclose'):
self.emphasis = Settings.config.get('semo_'+domainTag, 'emphasisclose')
self.generator = BasicTemplateGenerator(template_filename)
def generate(self, act):
if self.emphasis:
logger.warning('Emphasis is not implemented.')
return self.generator.transform(act)
class TopicManagerBasicSemO(BasicSemO):
'''
The generator class for topic manager domain. This is used for handling topic manager specfic conversations.
'''
def __init__(self, domainTag=None):
super(TopicManagerBasicSemO, self).__init__(domainTag)
self._set_NL_DOMAINS() # templates are slightly dynamic. This init's some messages.
# Methods just for TopicManager:
def _set_NL_DOMAINS(self):
"""Natural language for domain names
"""
domains = Settings.config.get("GENERAL",'domains') # a Hub has checked this exists
possible_domains = domains.split(',')
#lmr46: Adapting only the domains available in the config file
NL_DOMAINS = dict.fromkeys(OntologyUtils.available_domains)
for dom in possible_domains:
text=""
if dom=="CamRestaurants":
text= "Cambridge Restaurant"
elif dom=="CamHotels":
text='Cambridge Hotel'
elif dom=="Laptops6":
text= "Laptops"
elif dom=="camtourist":
text= "Cambridge Restaurants or hotels"
elif dom=="SFRestaurants":
text="San Francisco Restaurant"
elif dom=="SFHotels":
text="San Francisco Hotel"
NL_DOMAINS[dom] = text
#NL_DOMAINS["CamHotels"] = 'Cambridge Hotel'
#NL_DOMAINS["Laptops6"] = "Laptops"
#NL_DOMAINS["camtourist"] = "Restaurant or a hotel"
#TODO -- OTHER DOMAIN LABELS -- topic tracker only works for CamRestaurants and CamHotels at present - so only these here now.
self.possible_domains_NL = []
for dstring in possible_domains:
self.possible_domains_NL.append(NL_DOMAINS[dstring])
if len(possible_domains) > 1:
self.possible_domains_NL[-1] = 'or a ' + self.possible_domains_NL[-1]
return
def _filter_prompt(self,promptIn):
"""
"""
#Just used by the Generic Dialogue Manager at present
DTAG = "_DOMAINS_"
if DTAG in promptIn:
domains = ', '.join(self.possible_domains_NL)
return promptIn.replace(DTAG,domains)
return promptIn
def generate(self, act):
'''Overrides the BasicSemO generate() method to do some additional things just for topic manager domain.
'''
nlg = super(TopicManagerBasicSemO,self).generate(act)
return self._filter_prompt(nlg)
if __name__ == '__main__':
BasicTemplateGenerator('semo/templates/CamRestaurantsMessages.txt')
#END OF FILE
|
py | b40ab7f0e959459def5060e4ebd4b7743ec26e0b | # coding: utf-8
"""
LOCKSS Metadata Service REST API
API of the LOCKSS Metadata REST Service # noqa: E501
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import lockss_metadata
from lockss_metadata.api.metadata_api import MetadataApi # noqa: E501
from lockss_metadata.rest import ApiException
class TestMetadataApi(unittest.TestCase):
"""MetadataApi unit test stubs"""
def setUp(self):
self.api = lockss_metadata.api.metadata_api.MetadataApi() # noqa: E501
def tearDown(self):
pass
def test_delete_metadata_aus_auid(self):
"""Test case for delete_metadata_aus_auid
Delete the metadata stored for an AU # noqa: E501
"""
pass
def test_get_metadata_aus_auid(self):
"""Test case for get_metadata_aus_auid
Get the metadata stored for an AU # noqa: E501
"""
pass
def test_post_metadata_aus_item(self):
"""Test case for post_metadata_aus_item
Store the metadata for an AU item # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
py | b40ab87ab835bf125f310049c0c256915658473f | import hashlib
key = b'ckczppom'
for i in range(1, 2**64):
m = hashlib.md5()
m.update(key + str(i).encode())
s = m.hexdigest()[0:6]
if s[0:5] == '00000':
print('#', i)
if s == '000000':
print(i)
break
|
py | b40ab9e383fa55e57436a83dd45cc1353d458ec1 | #coding:utf-8
import itchat
from itchat.content import *
import re
from threading import Timer
# 目前是按用户发送的,用的 itchat.search_friends, 如果要改为群聊,需要换成 itchat.search_chatrooms
xyz_compile = re.compile(r'.*?绿健简报.*', re.S)
@itchat.msg_register(itchat.content.TEXT)
def xyz_reply(msg):
group_list = [u'filehelper']
group_name = []
for group in group_list:
chat = itchat.search_friends(name=group)
if len(chat) > 0:
group_name.append(chat[0]['UserName'])
# 兼容 filehelper
elif group == u'filehelper':
group_name.append(group)
# 过滤小宇宙新闻
# 需要换成小宇宙的 UserName
MY_NAME = itchat.search_friends(userName=msg['FromUserName'])['NickName']
print(MY_NAME)
if msg['FromUserName'] is not None and MY_NAME == u'陈望基':
result = xyz_compile.search(msg['Content'])
if result is not None:
if result.group() is not None:
for group in group_name:
itchat.send(msg['Content'], toUserName=group)
def loop_send():
global count
itchat.send('大扎好,我系轱天乐,我四渣嘎辉,探挽懒月,介四里没有挽过的船新版本,'
'挤需体验三番钟,里造会干我一样,爱像借款游戏。'
, toUserName='filehelper')
count += 1
if count < 10000:
Timer(600, loop_send).start()
if __name__ == '__main__':
count = 0
Timer(600, loop_send).start()
itchat.auto_login(enableCmdQR=2, hotReload=True)
itchat.run() |
py | b40abb1460052654dff2dc08931576b95cf331fd | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 24 09:08:02 2020
@author: rlk268
"""
"""
@author: [email protected]
"""
import havsim.simulation.calibration as hc
import time
import scipy.optimize as sc
import matplotlib.pyplot as plt
import math
import pickle
from havsim.calibration.algs import makeplatoonlist
import havsim.simulation.models as hm
import havsim.simulation.simulation as hs
# load data
try:
with open('C:/Users/rlk268/OneDrive - Cornell University/important misc/datasets/trajectory data/mydata.pkl', 'rb') as f:
rawdata, truedata, data, trueextradata = pickle.load(f) #load data
except:
with open('/home/rlk268/data/mydata.pkl', 'rb') as f:
rawdata, truedata, data, trueextradata = pickle.load(f) #load data
meas, platooninfo = makeplatoonlist(data,1,False)
# categorize vehicles
veh_list = meas.keys()
merge_list = []
lc_list = []
nolc_list = []
for veh in veh_list:
t_nstar, t_n = platooninfo[veh][0:2]
if t_n > t_nstar and meas[veh][t_n-t_nstar-1,7]==7 and meas[veh][t_n-t_nstar,7]==6:
merge_list.append(veh)
elif len(platooninfo[veh][4]) > 1:
lc_list.append(veh)
elif len(platooninfo[veh][4]) == 1:
nolc_list.append(veh)
# define training loop
def training(veh_id, plist, bounds, meas, platooninfo, dt, vehicle_object, cutoff = 6):
"""Runs bfgs with multiple initial guesses to fit parameters for a CalibrationVehicle"""
#veh_id = float vehicle id, plist = list of parameters, bounds = bounds for optimizer (list of tuples),
#vehicle_object = (possibly subclassed) CalibrationVehicle object, cutoff = minimum mse required for
#multiple guesses
cal = hc.make_calibration([veh_id], meas, platooninfo, dt, vehicle_object)
bestmse = math.inf
best = None
for guess in plist:
bfgs = sc.fmin_l_bfgs_b(cal.simulate, guess, bounds = bounds, approx_grad=1)
if bfgs[1] < bestmse:
best = bfgs
bestmse = bfgs[1]
if bestmse < cutoff:
break
return best
# can use this instead
def training_ga(veh_id_list, bounds, meas, platooninfo, dt, vehicle_object):
"""Runs differential evolution to fit parameters for a list of CalibrationVehicle's"""
#veh_id_list = list of float vehicle id, bounds = bounds for optimizer (list of tuples),
#vehicle_object = (possibly subclassed) CalibrationVehicle object
out = []
for veh_id in veh_id_list:
cal = hc.make_calibration([veh_id], meas, platooninfo, dt, vehicle_object)
ga = sc.differential_evolution(cal.simulate, bounds = bounds)
out.append(ga)
return out
"""
Run 1: IDM with no accident-free relax, no max speed bound, no acceleration bound (only for merge, lc)
"""
plist = [[40,1,1,3,10,25], [60,1,1,3,10,5], [80,1,15,1,1,35]]
bounds = [(20,120),(.1,5),(.1,35),(.1,20),(.1,20),(.1,75)]
relax_lc_res = []
relax_merge_res = []
for veh in lc_list:
out = training(veh,plist, bounds, meas, platooninfo, .1, hc.CalibrationVehicle)
relax_lc_res.append(out)
for veh in merge_list:
out = training(veh,plist, bounds, meas, platooninfo, .1, hc.CalibrationVehicle)
relax_merge_res.append(out)
with open('IDMrelax.pkl','wb') as f:
pickle.dump((relax_lc_res,relax_merge_res), f)
"""
Run 2: Like Run 1, but with relax disabled. (for all vehicles)
"""
#subclass calibrationvehicle as necessary
class NoRelaxIDM(hc.CalibrationVehicle):
def set_relax(self, *args):
pass
def initialize(self, parameters):
# initial conditions
self.lead = None
self.pos = self.initpos
self.speed = self.initspd
# reset relax
self.in_relax = False
self.relax = None
self.relax_start = None
# memory
self.leadmem = []
self.posmem = [self.pos]
self.speedmem = [self.speed]
self.relaxmem = []
# parameters
self.cf_parameters = parameters
plist = [[40,1,1,3,10], [60,1,1,3,10], [80,1,15,1,1]]
bounds = [(20,120),(.1,5),(.1,35),(.1,20),(.1,20)]
norelax_lc_res = []
norelax_merge_res = []
norelax_nolc_res = []
for veh in lc_list:
out = training(veh,plist, bounds, meas, platooninfo, .1, NoRelaxIDM)
norelax_lc_res.append(out)
for veh in merge_list:
out = training(veh,plist, bounds, meas, platooninfo, .1, NoRelaxIDM)
norelax_merge_res.append(out)
for veh in nolc_list[3:]:
out = training(veh,plist, bounds, meas, platooninfo, .1, NoRelaxIDM)
norelax_nolc_res.append(out)
with open('IDMnorelax.pkl','wb') as f:
pickle.dump((norelax_lc_res,norelax_merge_res,norelax_nolc_res),f)
"""
Run 3: OVM with no accident-free relax, no max speed bound, no acceleration bound (only for merge, lc)
"""
# make OVM calibrationvehicle
class OVMCalibrationVehicle(hc.CalibrationVehicle):
def cf_model(self, p, state):
return hm.OVM(p, state)
def get_cf(self, hd, spd, lead, curlane, timeind, dt, userelax):
if lead is None:
acc = curlane.call_downstream(self, timeind, dt)
else:
if self.in_relax:
# accident free formulation of relaxation
# ttc = hd / (self.speed - lead.speed)
# if ttc < 1.5 and ttc > 0:
if False: # disable accident free
temp = (ttc/1.5)**2
# currelax, currelax_v = self.relax[timeind-self.relax_start] # hd + v relax
# currelax, currelax_v = currelax*temp, currelax_v*temp
currelax = self.relax[timeind - self.relax_start]*temp
else:
# currelax, currelax_v = self.relax[timeind-self.relax_start]
currelax = self.relax[timeind - self.relax_start]
# acc = self.cf_model(self.cf_parameters, [hd + currelax, spd, lead.speed + currelax_v])
acc = self.cf_model(self.cf_parameters, [hd + currelax, spd, lead.speed])
else:
acc = self.cf_model(self.cf_parameters, [hd, spd, lead.speed])
return acc
def eqlfun(self, p, s):
return hm.OVM_eql(p, s)
def set_relax(self, relaxamounts, timeind, dt):
rp = self.relax_parameters
if rp is None:
return
relaxamount_s, relaxamount_v = relaxamounts
hs.relax_helper(rp, relaxamount_s, self, timeind, dt)
def initialize(self, parameters):
super().initialize(parameters)
self.maxspeed = parameters[0]*(1-math.tanh(-parameters[2]))
self.eql_type = 's' # you are supposed to set this in __init__
plist = [[10*3.3,.086/3.3, 1.545, 2, .175, 5 ], [20*3.3,.086/3.3/2, 1.545, .5, .175, 60 ], [10*3.3,.086/3.3/2, .5, .5, .175, 60 ]]
bounds = [(20,120),(.001,.1),(.1,2),(.1,5),(0,3), (.1,75)]
relax_lc_res_ovm = []
relax_merge_res_ovm = []
for veh in lc_list:
out = training(veh,plist, bounds, meas, platooninfo, .1, OVMCalibrationVehicle)
relax_lc_res_ovm.append(out)
for veh in merge_list:
out = training(veh,plist, bounds, meas, platooninfo, .1, OVMCalibrationVehicle)
relax_merge_res_ovm.append(out)
with open('OVMrelax.pkl', 'wb') as f:
pickle.dump((relax_lc_res_ovm, relax_merge_res_ovm),f)
"""
Run 4: Like Run 3, but with relax disabled. (for all vehicles)
"""
class NoRelaxOVM(OVMCalibrationVehicle):
def set_relax(self, *args):
pass
def initialize(self, parameters):
# initial conditions
self.lead = None
self.pos = self.initpos
self.speed = self.initspd
# reset relax
self.in_relax = False
self.relax = None
self.relax_start = None
# memory
self.leadmem = []
self.posmem = [self.pos]
self.speedmem = [self.speed]
self.relaxmem = []
# parameters
self.cf_parameters = parameters
plist = [[10*3.3,.086/3.3, 1.545, 2, .175 ], [20*3.3,.086/3.3/2, 1.545, .5, .175 ], [10*3.3,.086/3.3/2, .5, .5, .175 ]]
bounds = [(20,120),(.001,.1),(.1,2),(.1,5),(0,3)]
norelax_lc_res_ovm = []
norelax_merge_res_ovm = []
norelax_nolc_res_ovm = []
for veh in lc_list:
out = training(veh,plist, bounds, meas, platooninfo, .1, NoRelaxOVM)
norelax_lc_res_ovm.append(out)
for veh in merge_list:
out = training(veh,plist, bounds, meas, platooninfo, .1, NoRelaxOVM)
norelax_merge_res_ovm.append(out)
for veh in nolc_list:
out = training(veh,plist, bounds, meas, platooninfo, .1, NoRelaxOVM)
norelax_nolc_res_ovm.append(out)
with open('OVMnorelax.pkl', 'wb') as f:
pickle.dump((norelax_lc_res_ovm, norelax_merge_res_ovm, norelax_nolc_res_ovm),f)
|
py | b40abb99c4ac694d7f75bca847a6a981e264ce52 | """
Introduction
--------------
This python file contains the source code used to carry the data preparation
process
Code
------
"""
# -*- coding: utf-8 -*-
import logging
import pandas as pd
from pathlib import Path
from datetime import datetime
import sqlite3
BASE_RAW_DATA_DIR = 'data/raw'
"""
str: Base raw data directory
"""
BASE_PROCESSED_DATA_DIR = 'data/processed'
"""
str: Base processed data directory
"""
GPU_CSV_FILE = BASE_RAW_DATA_DIR + '/gpu.csv'
"""
str: gpu.csv file location
"""
CHECK_CSV_FILE = BASE_RAW_DATA_DIR + '/application-checkpoints.csv'
"""
str: application-checkpoints.csv filename file location
"""
TASK_CSV_FILE = BASE_RAW_DATA_DIR + '/task-x-y.csv'
"""
str: task-x-y.csv file location
"""
PROCESSED_CSV_FILE = BASE_PROCESSED_DATA_DIR + '/processed.csv'
"""
str: processed.csv final dataset file location
"""
TIMESTAMP_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
"""
str: string used to format timestamp for datetime conversion
"""
def timestamp_conv(df):
""" Converts a timestamp to datetime
Parameters
----------
df
dataframe to convert to datetime
-------
float
converted timestamp
"""
df = df.apply(lambda x: (datetime.strptime(x, TIMESTAMP_FORMAT)))
return(df)
def clean_gpu(gpu_df):
"""Clean gpu dataframe by dropping uneeded serial number and
fixes timestamp format to datetime
Parameters
----------
gpu_df
gpu dataframe to clean
Returns
-------
pandas.core.frame.DataFrame
Cleaned GPU dataframe
"""
# Drop uneeded serial column
gpu_df.drop(columns='gpuSerial', inplace=True)
gpu_df['timestamp'] = timestamp_conv(gpu_df['timestamp'])
return(gpu_df)
def merge_check_task(checkpoints_df, tasks_df):
"""merge (left join) checkpoints with task df through job and task id
Parameters
----------
checkpoints_df
application checkpoints dataframe to merge
tasks_df
tasks dataframe to merge
Returns
-------
pandas.core.frame.DataFrame
Cleaned GPU dataframe
"""
# Use left join on taskId and jobId
check_task_df = checkpoints_df.merge(tasks_df,
on=['taskId', 'jobId'], how='left')
return (check_task_df)
def clean_check_task(check_task_df):
"""Removes uneeded ids and fixes timestamp format to datetime
for merged application checkpoints and tasks df
Parameters
----------
check_task_df
merged application checkpoints and tasks df to clean
Returns
-------
pandas.core.frame.DataFrame
Cleaned GPU dataframe
"""
# Drop uneeded ids
check_task_df.drop(columns= ['jobId', 'taskId'], inplace=True)
# Fix date format
check_task_df['timestamp'] = timestamp_conv(check_task_df['timestamp'])
return(check_task_df)
def merge_check_task_gpu(gpu_df, check_task_df):
"""merge (left join) gpu df with first merged df through host and timestamp
Parameters
----------
check_task_df
application checkpoints and tasks megred dataframe to merge with gpu df
gpu_df
gpu dataframe to merge
Returns
-------
pandas.core.frame.DataFrame
Cleaned GPU dataframe
"""
# Record start and stop times for events and drop old timestamps
check_task_df_start = check_task_df[
check_task_df['eventType'] == 'START']
check_task_df_stop = check_task_df[
check_task_df['eventType'] == 'STOP']
check_task_df_start.rename(
index=str, columns={"timestamp": "start_time"}, inplace = True)
check_task_df_stop.rename(
index=str, columns={"timestamp": "stop_time"}, inplace = True)
check_task_df_stop.drop('eventType', axis = 1, inplace = True)
check_task_df_start.drop('eventType', axis = 1, inplace = True)
# Make each field record start and stop combined
check_task_df = pd.merge( check_task_df_start, check_task_df_stop,
on=['hostname', 'eventName', 'x', 'y', 'level'])
# Remove any timestamps that occur out of the gpu dataset
check_task_df = check_task_df[
(check_task_df['start_time'] >= gpu_df['timestamp'][0]) &
(check_task_df['stop_time']
<= gpu_df['timestamp'][len(gpu_df)-1])]
# Use sqllite to only combine with gpu if timestamp is between times
# connection to sql
conn = sqlite3.connect(':memory:')
# move dataframes to sql
check_task_df.to_sql('CheckTask', conn, index=False)
gpu_df.to_sql('Gpu', conn, index=False)
# SQL query
query = '''
SELECT *
FROM Gpu
LEFT JOIN CheckTask ON gpu.hostname = CheckTask.hostname
WHERE gpu.timestamp >= CheckTask.start_time
AND gpu.timestamp <= CheckTask.stop_time
'''
# get new df
merged_df = pd.read_sql_query(query, conn)
# drop duplicate hostname row (index 8)
merged_df = merged_df.loc[:,~merged_df.columns.duplicated()]
# group for averages (average stats for every task)
functions = {
'powerDrawWatt': 'mean', 'gpuTempC': 'mean',
'gpuUtilPerc': 'mean', 'gpuMemUtilPerc': 'mean',
'start_time': 'first', 'stop_time': 'first',
'gpuUUID' : 'first'}
merged_df = merged_df.groupby(
['hostname', 'eventName', 'x', 'y', 'level'],
as_index=False, sort=False
).agg(functions)
return(merged_df)
def main():
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
# Read datasets in
gpu_df = pd.read_csv(GPU_CSV_FILE)
checkpoints_df = pd.read_csv(CHECK_CSV_FILE)
tasks_df = pd.read_csv(TASK_CSV_FILE)
# Cleaning and merging process
gpu_df = clean_gpu(gpu_df)
check_task_df = merge_check_task(checkpoints_df, tasks_df)
check_task_df = clean_check_task(check_task_df)
check_task_gpu_df = merge_check_task_gpu(gpu_df, check_task_df)
# save final dataset
check_task_gpu_df.to_csv(PROCESSED_CSV_FILE)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
main()
|
py | b40abc1576141e90f453b596874d34f2f7082bd3 | import pytest
from iremember.users.models import User
pytestmark = pytest.mark.django_db
def test_user_get_absolute_url(user: User):
assert user.get_absolute_url() == f"/users/{user.username}/"
|
py | b40abc2e02103bbc09e6407adafaa68277777f52 | #
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2008-2022 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com
#
# https://www.gpvdm.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
## @package icon_lib
# An icon cache.
#
import sys
import os
from cal_path import get_icon_path
from win_lin import running_on_linux
from cal_path import get_image_file_path
from cal_path import gpvdm_paths
from gpvdm_local import gpvdm_local
try:
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtGui import QPainter,QIcon
except:
pass
icon_db=[]
use_theme=None
class icon:
def __init__(self):
self.name=[]
self.file_name=""
self.icon16x16=None
self.icon32x32=None
self.icon64x64=None
class icon_data_base:
def __init__(self):
self.db=[]
self.load()
def load(self):
data=gpvdm_local()
use_theme=data.gui_config.gui_use_icon_theme
path_16=os.path.join(get_image_file_path(),"16x16")
path_32=os.path.join(get_image_file_path(),"32x32")
path_64=os.path.join(get_image_file_path(),"64x64")
for f in os.listdir(path_32):
if f.endswith("png"):
my_icon=icon()
my_icon.name.append(f.split(".")[0])
my_icon.file_name=f.split(".")[0] #no ext
found=False
if running_on_linux()==True and use_theme==True:
image=QIcon()
if image.hasThemeIcon(my_icon.name[0])==True:
my_icon.icon16x16=image.fromTheme(my_icon.name[0])
my_icon.icon32x32=image.fromTheme(my_icon.name[0])
my_icon.icon64x64=image.fromTheme(my_icon.name[0])
found=True
if found==False:
my_icon.icon16x16=QIcon(os.path.join(path_16,my_icon.file_name+".png"))
my_icon.icon32x32=QIcon(os.path.join(path_32,my_icon.file_name+".png"))
my_icon.icon64x64=QIcon(os.path.join(path_64,my_icon.file_name+".png"))
self.db.append(my_icon)
for line in data.icon_lib.var_list:
for i in range(0,len(self.db)):
if line[1] == self.db[i].file_name:
self.db[i].name.append(line[0])
def dump(self):
for i in range(0,len(self.db)):
print(self.db[i].name,self.db[i].file_name)
def icon_get(self,token,size=-1):
for i in range(0,len(self.db)):
if token in self.db[i].name:
if size==16:
return self.db[i].icon16x16
elif size==32:
return self.db[i].icon32x32
elif size==64 or size==-1:
return self.db[i].icon64x64
return False
def icon_init_db():
global icon_db
icon_db=icon_data_base()
def icon_get_db():
global icon_db
return icon_db
def icon_get(token,size=-1,sub_icon=None):
global icon_db
if token!=".png" and token.endswith(".png")==True:
token=token[:-4]
if sub_icon==None:
return icon_db.icon_get(token,size=size)
icon_ret=icon_db.icon_get(token)
if icon_ret!=False:
if sub_icon!=None:
icon1=icon_ret
icon2=icon_db.icon_get(sub_icon)
icon1_pixmap=icon1.pixmap(QSize(48,48))
icon2_small = icon2.pixmap(QSize(48,48)).scaled(QSize(24,24), Qt.KeepAspectRatio, Qt.SmoothTransformation);
p=QPainter(icon1_pixmap)
p.drawPixmap(24,24,icon2_small);
p.end()
icon_ret=QIcon(icon1_pixmap)
return icon_ret
else:
return False
|
py | b40abd7de150fe406d2c96147a45b6a033140f04 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Tea Sample')
class TestTeaSample(unittest.TestCase):
pass
|
py | b40abddbb6091305095b0b3f9761b7a58b5d4e4a | import json
import pickle
from dataclasses import fields
from meru.actions import Action
from meru.base import MeruObject
from meru.constants import MERU_SERIALIZATION_METHOD
from meru.exceptions import ActionException
from meru.introspection import get_subclasses
def serialize_objects(obj):
if hasattr(obj, "to_dict"):
data = obj.to_dict()
else:
data = obj.__dict__
return data
def deserialize_objects(obj):
if "object_type" in obj.keys():
subclass = get_subclasses(MeruObject)[obj["object_type"]]
if subclass:
calling_args = []
for field in fields(subclass):
if not field.init:
continue
cast_to = field.metadata.get("cast", None)
if cast_to:
calling_args.append(cast_to(obj[field.name]))
else:
calling_args.append(obj[field.name])
action = subclass(*calling_args)
# Force timestamp to be added correctly to Actions.
# Timestamp can not be found with getfullargsspec, since
# it can not be in __init__.
# see: https://bugs.python.org/issue36077
if isinstance(action, Action):
action.timestamp = obj["timestamp"]
return action
raise ActionException(f'Object {obj["object_type"]} not found.')
return obj
def encode_object(action: any, method_override=None):
if "json" in (MERU_SERIALIZATION_METHOD, method_override):
encoded_object = json.dumps(action, default=serialize_objects).encode()
else:
encoded_object = pickle.dumps(action)
return encoded_object
def decode_object(action: any, method_override=None):
if "json" in (MERU_SERIALIZATION_METHOD, method_override):
data = json.loads(action, object_hook=deserialize_objects)
else:
data = pickle.loads(action)
return data
|
py | b40abe89748b1fc4016498183f04affa4b0a7988 | import re, sublime
class Tag():
def __init__(self):
Tag.regexp_is_valid = re.compile("^[a-z0-9#\:\-_]+$", re.I);
Tag.regexp_self_closing_optional = re.compile("^<?(\?xml|\!|area|base|br|col|frame|hr|img|input|link|meta|param|command|embed|source|/?li|/?p)[^a-z]", re.I);
Tag.regexp_self_closing = re.compile("^<?(\?xml|\!|area|base|br|col|frame|hr|img|input|link|meta|param|command|embed|source)[^a-z]", re.I);
Tag.regexp_self_closing_xml = re.compile("^<?(\?xml|\!)[^a-z]", re.I);
Tag.regexp_is_closing = re.compile("^<?[^><]+/>", re.I);
Tag.xml_files = [item.lower() for item in ['xhtml', 'xml', 'rdf', 'xul', 'svg', 'xsd', 'xslt','tmTheme', 'tmPreferences', 'tmLanguage', 'sublime-snippet', 'opf', 'ncx']]
def is_valid(self, content):
return Tag.regexp_is_valid.match(content)
def is_self_closing(self, content, return_optional_tags = True, is_xml= False):
if return_optional_tags:
if is_xml == False:
return Tag.regexp_self_closing.match(content) or Tag.regexp_is_closing.match(content)
else:
return Tag.regexp_is_closing.match(content) or Tag.regexp_self_closing_xml.match(content)
else:
if is_xml == False:
return Tag.regexp_self_closing_optional.match(content) or Tag.regexp_is_closing.match(content)
else:
return Tag.regexp_is_closing.match(content) or Tag.regexp_self_closing_xml.match(content)
def name(self, content, return_optional_tags = True, is_xml = False):
if content[:1] == '/':
tag_name = content.split('/')[1].split('>')[0];
else:
tag_name = content.split(' ')[0].split('>')[0];
if self.is_valid(tag_name) and not self.is_self_closing(content, return_optional_tags, is_xml):
return tag_name
else:
return ''
def is_closing(self, content):
if content[:1] == '/' or Tag.regexp_is_closing.match(content):
return True
else:
return False
def view_is_xml(self, view):
if view.settings().get('is_xml'):
return True
else:
name = view.file_name()
if not name:
is_xml = '<?xml' in view.substr(sublime.Region(0, 50))
else:
name = ('name.'+name).split('.')
name.reverse()
name = name.pop(0).lower()
is_xml = name in Tag.xml_files or '<?xml' in view.substr(sublime.Region(0, 50))
view.settings().set('is_xml', is_xml)
return is_xml
def clean_html(self, content):
# normalize
content = content.replace('\r', '\n').replace('\t', ' ')
# comments
unparseable = content.split('<!--')
content = unparseable.pop(0)
l = len(unparseable)
i = 0
while i < l:
tmp = unparseable[i].split('-->')
content += '....'
content += len(tmp.pop(0))*'.'
content += '...'
content += "...".join(tmp)
i += 1
unparseable = content.split('/*')
content = unparseable.pop(0)
l = len(unparseable)
i = 0
while i < l:
tmp = unparseable[i].split('*/')
content += '..'
content += len(tmp.pop(0))*'.'
content += '..'
content += "..".join(tmp)
i += 1
unparseable = re.split('(\s\/\/[^\n]+\n)', content)
for comment in unparseable:
if comment[:3] == '\n//' or comment[:3] == ' //':
content = content.replace(comment, (len(comment))*'.')
unparseable = re.split('(\s\#[^\n]+\n)', content)
for comment in unparseable:
if comment[:3] == '\n#' or comment[:3] == ' #':
content = content.replace(comment, (len(comment))*'.')
# script
unparseable = content.split('<script')
content = unparseable.pop(0)
l = len(unparseable)
i = 0
while i < l:
tmp = unparseable[i].split('</script>')
content += '.......'
content += len(tmp.pop(0))*'.'
content += '.........'
content += ".........".join(tmp)
i += 1
# style
unparseable = content.split('<style')
content = unparseable.pop(0)
l = len(unparseable)
i = 0
while i < l:
tmp = unparseable[i].split('</style>')
content += '......'
content += len(tmp.pop(0))*'.'
content += '........'
content += "........".join(tmp)
i += 1
return content
|
py | b40abf5c6fe89301f7bc01f7639a2d4a8ab4d95f | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('welcome', '0019_auto_20170420_1518'),
]
operations = [
migrations.AddField(
model_name='articlerely',
name='photo',
field=models.CharField(max_length=100, null=True, verbose_name=b'\xe9\x9a\x8f\xe6\x9c\xba\xe5\xa4\xb4\xe5\x83\x8f', blank=True),
),
migrations.AlterField(
model_name='articlerely',
name='email',
field=models.CharField(max_length=80, null=True, verbose_name=b'\xe8\xaf\x84\xe8\xae\xba\xe8\x80\x85\xe9\x82\xae\xe7\xae\xb1', blank=True),
),
]
|
py | b40abf9638852034807055f9b4ebfff0d5f1a682 | import functools
from gym_gridverse.action import Action
from gym_gridverse.envs.reward_functions import reward_function_registry
from gym_gridverse.state import State
@reward_function_registry.register
def generalized_static(
state: State,
action: Action,
next_state: State,
*,
reward_if_static: float = -1.0,
reward_if_not_static: float = 0.0,
) -> float:
"""determines reward depending on whether state is unchanged"""
return reward_if_static if state == next_state else reward_if_not_static
# binding two variants of generalized_static_reward
stronger_static = functools.partial(
generalized_static,
reward_if_static=-2.0,
reward_if_not_static=1.0,
)
weaker_static = functools.partial(
generalized_static,
reward_if_static=-0.2,
reward_if_not_static=0.1,
)
|
py | b40abf9e82bd9f0244eb5671dd9f60d5b6e6a4f9 | # -*- coding: utf-8 -*-
from django.test import TestCase
from django.contrib.gis.geos import LineString, Point
from django.conf import settings
from geotrek.common.utils import almostequal
from geotrek.core.factories import PathFactory, TopologyFactory, NetworkFactory, UsageFactory
from geotrek.core.models import Path, Topology
class SplitPathTest(TestCase):
def test_split_attributes(self):
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
ab.networks.add(NetworkFactory.create())
ab.usages.add(UsageFactory.create())
PathFactory.create(geom=LineString((2, 0), (2, 2)))
ab_2 = Path.objects.filter(name="AB").exclude(pk=ab.pk)[0]
self.assertEqual(ab.source, ab_2.source)
self.assertEqual(ab.stake, ab_2.stake)
self.assertListEqual(list(ab.networks.all()), list(ab_2.networks.all()))
self.assertListEqual(list(ab.usages.all()), list(ab_2.usages.all()))
def test_split_tee_1(self):
"""
C
A +----+----+ B
|
+ AB exists. Add CD.
D
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
self.assertEqual(ab.length, 4)
cd = PathFactory.create(geom=LineString((2, 0), (2, 2)))
self.assertEqual(cd.length, 2)
# Make sure AB was split :
ab.reload()
self.assertEqual(ab.geom, LineString((0, 0), (2, 0)))
self.assertEqual(ab.length, 2) # Length was also updated
# And a clone of AB was created
clones = Path.objects.filter(name="AB").exclude(pk=ab.pk)
self.assertEqual(len(clones), 1)
ab_2 = clones[0]
self.assertEqual(ab_2.geom, LineString((2, 0), (4, 0)))
self.assertEqual(ab_2.length, 2) # Length was also updated
def test_split_tee_2(self):
"""
CD exists. Add AB.
"""
cd = PathFactory.create(geom=LineString((2, 0), (2, 2)))
self.assertEqual(cd.length, 2)
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
# Make sure AB was split :
self.assertEqual(ab.geom, LineString((0, 0), (2, 0)))
self.assertEqual(ab.length, 2) # Length was also updated
clones = Path.objects.filter(name="AB").exclude(pk=ab.pk)
ab_2 = clones[0]
self.assertEqual(ab_2.geom, LineString((2, 0), (4, 0)))
self.assertEqual(ab_2.length, 2) # Length was also updated
def test_split_cross(self):
"""
C
+
|
A +----+----+ B
|
+ AB exists. Add CD.
D
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
cd = PathFactory.create(name="CD", geom=LineString((2, -2), (2, 2)))
ab.reload()
ab_2 = Path.objects.filter(name="AB").exclude(pk=ab.pk)[0]
cd_2 = Path.objects.filter(name="CD").exclude(pk=cd.pk)[0]
self.assertEqual(ab.geom, LineString((0, 0), (2, 0)))
self.assertEqual(cd.geom, LineString((2, -2), (2, 0)))
self.assertEqual(ab_2.geom, LineString((2, 0), (4, 0)))
self.assertEqual(cd_2.geom, LineString((2, 0), (2, 2)))
def test_split_cross_on_deleted(self):
"""
Paths should not be splitted if they cross deleted paths.
(attribute delete=True)
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
self.assertEqual(len(Path.objects.all()), 1)
ab.delete()
self.assertEqual(len(Path.objects.all()), 0)
PathFactory.create(name="CD", geom=LineString((2, -2), (2, 2)))
self.assertEqual(len(Path.objects.all()), 1)
def test_split_on_update(self):
"""
+ E
:
A +----+----+ B A +----+----+ B
:
C +----+ D C +----+ D
AB and CD exist. CD updated into CE.
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
cd = PathFactory.create(name="CD", geom=LineString((0, -2), (2, -2)))
self.assertEqual(ab.length, 4)
self.assertEqual(cd.length, 2)
cd.geom = LineString((0, -2), (2, -2), (2, 2))
cd.save()
ab.reload()
self.assertEqual(ab.length, 2)
self.assertEqual(cd.length, 4)
ab_2 = Path.objects.filter(name="AB").exclude(pk=ab.pk)[0]
cd_2 = Path.objects.filter(name="CD").exclude(pk=cd.pk)[0]
self.assertEqual(ab_2.length, 2)
self.assertEqual(cd_2.length, 2)
def test_split_twice(self):
"""
C D
+ +
| |
A +--+---+--+ B
| |
+---+
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
cd = PathFactory.create(name="CD", geom=LineString((1, 2), (1, -2),
(3, -2), (3, 2)))
ab.reload()
self.assertEqual(ab.length, 1)
self.assertEqual(cd.length, 2)
ab_clones = Path.objects.filter(name="AB").exclude(pk=ab.pk)
cd_clones = Path.objects.filter(name="CD").exclude(pk=cd.pk)
self.assertEqual(len(ab_clones), 2)
self.assertEqual(len(cd_clones), 2)
# Depending on PostgreSQL fetch order
if ab_clones[0].geom == LineString((1, 0), (3, 0)):
self.assertEqual(ab_clones[0].geom, LineString((1, 0), (3, 0)))
self.assertEqual(ab_clones[1].geom, LineString((3, 0), (4, 0)))
else:
self.assertEqual(ab_clones[0].geom, LineString((3, 0), (4, 0)))
self.assertEqual(ab_clones[1].geom, LineString((1, 0), (3, 0)))
if cd_clones[0].geom == LineString((3, 0), (3, 2)):
self.assertEqual(cd_clones[0].geom, LineString((3, 0), (3, 2)))
self.assertEqual(cd_clones[1].geom, LineString((1, 0), (1, -2),
(3, -2), (3, 0)))
else:
self.assertEqual(cd_clones[0].geom, LineString((1, 0), (1, -2),
(3, -2), (3, 0)))
self.assertEqual(cd_clones[1].geom, LineString((3, 0), (3, 2)))
def test_add_shortest_path(self):
"""
A +---- -----+ C
\ /
\ /
--+--
B
D E
A +---+---------+---+ C
\ /
\ /
--+--
B
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0),
(6, -2), (8, -2)))
cb = PathFactory.create(name="CB", geom=LineString((14, 0), (12, 0),
(10, -2), (8, -2)))
de = PathFactory.create(name="DE", geom=LineString((4, 0), (12, 0)))
# Paths were split, there are 5 now
self.assertEqual(len(Path.objects.all()), 5)
ab.reload()
cb.reload()
de.reload()
ab_2 = Path.objects.filter(name="AB").exclude(pk=ab.pk)[0]
cb_2 = Path.objects.filter(name="CB").exclude(pk=cb.pk)[0]
self.assertEqual(de.geom, LineString((4, 0), (12, 0)))
self.assertEqual(ab.geom, LineString((0, 0), (4, 0)))
self.assertEqual(ab_2.geom, LineString((4, 0), (6, -2), (8, -2)))
self.assertEqual(cb.geom, LineString((14, 0), (12, 0)))
self.assertEqual(cb_2.geom, LineString((12, 0), (10, -2), (8, -2)))
def test_split_almost(self):
"""
C D
+ +
\ /
A +--V--+ B
E
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
cd = PathFactory.create(name="CD", geom=LineString((1, 1), (2, -0.2),
(3, 1)))
ab.reload()
cd.reload()
eb = Path.objects.filter(name="AB").exclude(pk=ab.pk)[0]
ed = Path.objects.filter(name="CD").exclude(pk=cd.pk)[0]
self.assertEqual(ab.geom, LineString((0, 0), (2, -0.2)))
self.assertEqual(cd.geom, LineString((1, 1), (2, -0.2)))
self.assertEqual(eb.geom, LineString((2, -0.2), (4, 0)))
self.assertEqual(ed.geom, LineString((2, -0.2), (3, 1)))
def test_split_almost_2(self):
"""
+ C
|
A +------- ... ----+ B
|
+ D
"""
cd = PathFactory.create(name="CD", geom=LineString((0.1, 1), (0.1, -1)))
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (10000000, 0)))
ab.reload()
cd.reload()
self.assertEqual(ab.geom, LineString((0.1, 0), (10000000, 0)))
self.assertEqual(cd.geom, LineString((0.1, 1), (0.1, 0)))
self.assertEqual(len(Path.objects.all()), 3)
def test_split_almost_3(self):
"""
+ C
|
A +-+------ ... ----+ B
|
+ D
"""
cd = PathFactory.create(name="CD", geom=LineString((1.1, 1), (1.1, -1)))
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (10000000, 0)))
ab.reload()
cd.reload()
self.assertEqual(ab.geom, LineString((0, 0), (1.1, 0)))
self.assertEqual(cd.geom, LineString((1.1, 1), (1.1, 0)))
self.assertEqual(len(Path.objects.all()), 4)
def test_split_almost_4(self):
"""
C
-----+----+ A
| |
| |
-----+----+ B
D
"""
ab = PathFactory.create(name="AB", geom=LineString((998522.520690918, 6381896.4595642),
(997785.990158081, 6381124.21846007),
(998272.546691896, 6380561.77696227),
(999629.548400879, 6381209.03106688)))
cd = PathFactory.create(name="CD", geom=LineString((998522.520690918, 6381896.4595642),
(999098.044800479, 6380955.51783641)))
ab.reload()
cd.reload()
self.assertEqual(len(Path.objects.all()), 3)
def test_split_multiple(self):
"""
C E G I
+ + + +
| | | |
A +--+---+---+---+--+ B
| | | |
+ + + +
D F H J
"""
PathFactory.create(name="CD", geom=LineString((1, -2), (1, 2)))
PathFactory.create(name="EF", geom=LineString((2, -2), (2, 2)))
PathFactory.create(name="GH", geom=LineString((3, -2), (3, 2)))
PathFactory.create(name="IJ", geom=LineString((4, -2), (4, 2)))
PathFactory.create(name="AB", geom=LineString((0, 0), (5, 0)))
self.assertEqual(len(Path.objects.filter(name="CD")), 2)
self.assertEqual(len(Path.objects.filter(name="EF")), 2)
self.assertEqual(len(Path.objects.filter(name="GH")), 2)
self.assertEqual(len(Path.objects.filter(name="IJ")), 2)
self.assertEqual(len(Path.objects.filter(name="AB")), 5)
def test_split_multiple_2(self):
"""
C E G I
+ + + +
| | | |
| | | |
A +--+---+---+---+--+ B
D F H J
"""
PathFactory.create(name="CD", geom=LineString((1, -2), (1, 2)))
PathFactory.create(name="EF", geom=LineString((2, -2), (2, 2)))
PathFactory.create(name="GH", geom=LineString((3, -2), (3, 2)))
PathFactory.create(name="IJ", geom=LineString((4, -2), (4, 2)))
PathFactory.create(name="AB", geom=LineString((0, -2), (5, -2)))
self.assertEqual(len(Path.objects.filter(name="CD")), 1)
self.assertEqual(len(Path.objects.filter(name="EF")), 1)
self.assertEqual(len(Path.objects.filter(name="GH")), 1)
self.assertEqual(len(Path.objects.filter(name="IJ")), 1)
self.assertEqual(len(Path.objects.filter(name="AB")), 5)
def test_split_multiple_3(self):
"""
+ +
E \ / F
A +---+--+--------+--+---+ B
| \ / | AB exists. Create EF. Create CD.
+----+----+----+
\ /
\/
"""
PathFactory.create(name="AB", geom=LineString((0, 0), (10, 0)))
PathFactory.create(name="EF", geom=LineString((2, 0), (2, -1), (8, -1), (8, 0)))
PathFactory.create(name="CD", geom=LineString((2, 1), (5, -2), (8, 1)))
self.assertEqual(len(Path.objects.filter(name="AB")), 5)
self.assertEqual(len(Path.objects.filter(name="EF")), 3)
self.assertEqual(len(Path.objects.filter(name="CD")), 5)
def test_split_multiple_4(self):
"""
Same as previous, without round values for intersections.
C D
+ +
E \ / F
A +---+--+--------+--+---+ B
\ \ / / AB exists. Create EF. Create CD.
\ \ / /
---+--+---
\/
"""
PathFactory.create(name="AB", geom=LineString((0, 0), (10, 0)))
PathFactory.create(name="EF", geom=LineString((2, 0), (2, -1), (8, -1), (8, 0)))
PathFactory.create(name="CD", geom=LineString((2, 1), (5, -2), (8, 1)))
PathFactory.create(name="CD", geom=LineString((3, 1), (5, -2), (7, 1)))
self.assertEqual(len(Path.objects.filter(name="AB")), 5)
self.assertEqual(len(Path.objects.filter(name="EF")), 3)
class SplitPathLineTopologyTest(TestCase):
def test_split_tee_1(self):
"""
C
A +---===+===---+ B
A' | B'
+ AB exists with topology A'B'.
D Add CD.
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
# Create a topology
topology = TopologyFactory.create(no_path=True)
topology.add_path(ab, start=0.25, end=0.75)
topogeom = topology.geom
# Topology covers 1 path
self.assertEqual(len(topology.paths.all()), 1)
PathFactory.create(name="CD", geom=LineString((2, 0), (2, 2)))
cb = Path.objects.filter(name="AB").exclude(pk=ab.pk)[0]
# Topology now covers 2 paths
self.assertEqual(len(topology.paths.all()), 2)
# AB and AB2 has one topology each
self.assertEqual(len(ab.aggregations.all()), 1)
self.assertEqual(len(cb.aggregations.all()), 1)
# Topology position became proportional
aggr_ab = ab.aggregations.all()[0]
aggr_cb = cb.aggregations.all()[0]
self.assertEqual((0.5, 1.0), (aggr_ab.start_position, aggr_ab.end_position))
self.assertEqual((0.0, 0.5), (aggr_cb.start_position, aggr_cb.end_position))
topology.reload()
self.assertNotEqual(topology.geom, topogeom)
self.assertEqual(topology.geom.coords[0], topogeom.coords[0])
self.assertEqual(topology.geom.coords[-1], topogeom.coords[-1])
def test_split_tee_1_reversed(self):
"""
C
A +---===+===---+ B
A' | B'
+ AB exists with topology A'B'.
D Add CD.
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
# Create a topology
topology = TopologyFactory.create(no_path=True)
topology.add_path(ab, start=0.75, end=0.25, order=1)
# Topology covers 1 path
self.assertEqual(len(topology.paths.all()), 1)
PathFactory.create(name="CD", geom=LineString((2, 0), (2, 2)))
cb = Path.objects.filter(name="AB").exclude(pk=ab.pk)[0]
# Topology now covers 2 paths
self.assertEqual(len(topology.paths.all()), 2)
# AB and AB2 has one topology each
self.assertEqual(len(ab.aggregations.all()), 1)
self.assertEqual(len(cb.aggregations.all()), 1)
# Topology position became proportional
aggr_ab = ab.aggregations.all()[0]
aggr_cb = cb.aggregations.all()[0]
self.assertEqual((1.0, 0.5), (aggr_ab.start_position, aggr_ab.end_position))
self.assertEqual((0.5, 0.0), (aggr_cb.start_position, aggr_cb.end_position))
topology.reload()
self.assertEqual(topology.geom, LineString((3.0, 0.0, 0.0), (2.0, 0.0, 0.0), (1.0, 0.0, 0.0)))
def test_split_tee_2(self):
"""
C
A +---+---=====--+ B
| A' B'
+ AB exists with topology A'B'.
D Add CD
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
# Create a topology
topology = TopologyFactory.create(no_path=True)
topology.add_path(ab, start=0.5, end=0.75)
topogeom = topology.geom
# Topology covers 1 path
self.assertEqual(len(ab.aggregations.all()), 1)
self.assertEqual(len(topology.paths.all()), 1)
self.assertEqual(topology.paths.all()[0], ab)
PathFactory.create(name="CD", geom=LineString((1, 0), (1, 2)))
# CB was just created
cb = Path.objects.filter(name="AB").exclude(pk=ab.pk)[0]
# AB has no topology anymore
self.assertEqual(len(ab.aggregations.all()), 0)
# Topology now still covers 1 path, but the new one
self.assertEqual(len(topology.paths.all()), 1)
self.assertEqual(len(cb.aggregations.all()), 1)
self.assertEqual(topology.paths.all()[0].pk, cb.pk)
topology.reload()
self.assertEqual(topology.geom, topogeom)
def test_split_tee_2_reversed(self):
"""
C
A +---+---=====--+ B
| A' B'
+ AB exists with topology A'B'.
D Add CD
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
# Create a topology
topology = TopologyFactory.create(no_path=True)
topology.add_path(ab, start=0.75, end=0.5)
topogeom = topology.geom
# Topology covers 1 path
self.assertEqual(len(ab.aggregations.all()), 1)
self.assertEqual(len(topology.paths.all()), 1)
self.assertEqual(topology.paths.all()[0], ab)
PathFactory.create(name="CD", geom=LineString((1, 0), (1, 2)))
# CB was just created
cb = Path.objects.filter(name="AB").exclude(pk=ab.pk)[0]
# AB has no topology anymore
self.assertEqual(len(ab.aggregations.all()), 0)
# Topology now still covers 1 path, but the new one
self.assertEqual(len(topology.paths.all()), 1)
self.assertEqual(len(cb.aggregations.all()), 1)
self.assertEqual(topology.paths.all()[0].pk, cb.pk)
topology.reload()
self.assertEqual(topology.geom, topogeom)
def test_split_tee_3(self):
"""
C
A +--=====--+---+ B
A' B' |
+ AB exists with topology A'B'.
D Add CD
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
# Create a topology
topology = TopologyFactory.create(no_path=True)
topology.add_path(ab, start=0.3, end=0.6)
topogeom = topology.geom
# Topology covers 1 path
self.assertEqual(len(ab.aggregations.all()), 1)
self.assertEqual(len(topology.paths.all()), 1)
self.assertEqual(topology.paths.all()[0], ab)
PathFactory.create(name="CD", geom=LineString((3, 0), (3, 2)))
cb = Path.objects.filter(name="AB").exclude(pk=ab.pk)[0]
# CB does not have any
self.assertEqual(len(cb.aggregations.all()), 0)
# AB has still its topology
self.assertEqual(len(ab.aggregations.all()), 1)
# But start/end have changed
aggr_ab = ab.aggregations.all()[0]
self.assertEqual((0.4, 0.8), (aggr_ab.start_position, aggr_ab.end_position))
topology.reload()
self.assertEqual(topology.geom, topogeom)
def test_split_tee_3_reversed(self):
"""
C
A +--=====--+---+ B
A' B' |
+ AB exists with topology A'B'.
D Add CD
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
# Create a topology
topology = TopologyFactory.create(no_path=True)
topology.add_path(ab, start=0.45, end=0.15)
# Topology covers 1 path
self.assertEqual(len(ab.aggregations.all()), 1)
self.assertEqual(len(topology.paths.all()), 1)
self.assertEqual(topology.paths.all()[0], ab)
PathFactory.create(name="CD", geom=LineString((3, 0), (3, 2)))
cb = Path.objects.filter(name="AB").exclude(pk=ab.pk)[0]
# CB does not have any
self.assertEqual(len(cb.aggregations.all()), 0)
# AB has still its topology
self.assertEqual(len(ab.aggregations.all()), 1)
# But start/end have changed
aggr_ab = ab.aggregations.all()[0]
self.assertEqual((0.6, 0.2), (aggr_ab.start_position, aggr_ab.end_position))
topology.reload()
self.assertEqual(topology.geom, LineString((1.7999999999999998, 0.0, 0.0), (0.5999999999999996, 0.0, 0.0)))
def test_split_tee_4(self):
"""
B C E
A +--===+===+===+===--+ F
|
+ AB, BE, EF exist. A topology exists along them.
D Add CD.
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (2, 0)))
be = PathFactory.create(name="BE", geom=LineString((2, 0), (4, 0)))
ef = PathFactory.create(name="EF", geom=LineString((4, 0), (6, 0)))
# Create a topology
topology = TopologyFactory.create(no_path=True)
topology.add_path(ab, start=0.5, end=1)
topology.add_path(be, start=0, end=1)
topology.add_path(ef, start=0.0, end=0.5)
topogeom = topology.geom
self.assertEqual(len(ab.aggregations.all()), 1)
self.assertEqual(len(be.aggregations.all()), 1)
self.assertEqual(len(ef.aggregations.all()), 1)
self.assertEqual(len(topology.paths.all()), 3)
# Create CD
PathFactory.create(name="CD", geom=LineString((3, 0), (3, 2)))
# Topology now covers 4 paths
self.assertEqual(len(topology.paths.all()), 4)
# AB and EF have still their topology
self.assertEqual(len(ab.aggregations.all()), 1)
self.assertEqual(len(ef.aggregations.all()), 1)
# BE and CE have one topology from 0.0 to 1.0
bc = Path.objects.filter(pk=be.pk)[0]
ce = Path.objects.filter(name="BE").exclude(pk=be.pk)[0]
self.assertEqual(len(bc.aggregations.all()), 1)
self.assertEqual(len(ce.aggregations.all()), 1)
aggr_bc = bc.aggregations.all()[0]
aggr_ce = ce.aggregations.all()[0]
self.assertEqual((0.0, 1.0), (aggr_bc.start_position, aggr_bc.end_position))
self.assertEqual((0.0, 1.0), (aggr_ce.start_position, aggr_ce.end_position))
topology.reload()
self.assertEqual(len(topology.aggregations.all()), 4)
# Geometry has changed
self.assertNotEqual(topology.geom, topogeom)
# But extremities are equal
self.assertEqual(topology.geom.coords[0], topogeom.coords[0])
self.assertEqual(topology.geom.coords[-1], topogeom.coords[-1])
def test_split_tee_4_reversed(self):
"""
B C E
A +--===+===+===+===--+ F
|
+ AB, BE, EF exist. A topology exists along them.
D Add CD.
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (2, 0)))
be = PathFactory.create(name="BE", geom=LineString((4, 0), (2, 0)))
ef = PathFactory.create(name="EF", geom=LineString((4, 0), (6, 0)))
# Create a topology
topology = TopologyFactory.create(no_path=True)
topology.add_path(ab, start=0.5, end=1)
topology.add_path(be, start=1, end=0)
topology.add_path(ef, start=0.0, end=0.5)
# Create DC
PathFactory.create(name="DC", geom=LineString((3, 0), (3, 2)))
# Topology now covers 4 paths
topology.reload()
self.assertEqual(len(topology.paths.all()), 4)
# BE and CE have one topology from 0.0 to 1.0
bc = Path.objects.filter(pk=be.pk)[0]
ce = Path.objects.filter(name="BE").exclude(pk=be.pk)[0]
aggr_ab = ab.aggregations.all()[0]
aggr_bc = bc.aggregations.all()[0]
aggr_ce = ce.aggregations.all()[0]
aggr_ef = ef.aggregations.all()[0]
self.assertEqual((0.5, 1.0), (aggr_ab.start_position, aggr_ab.end_position))
self.assertEqual((1.0, 0.0), (aggr_bc.start_position, aggr_bc.end_position))
self.assertEqual((1.0, 0.0), (aggr_ce.start_position, aggr_ce.end_position))
self.assertEqual((0.0, 0.5), (aggr_ef.start_position, aggr_ef.end_position))
topology.reload()
self.assertEqual(len(topology.aggregations.all()), 4)
# Geometry has changed
self.assertEqual(topology.geom, LineString((1.0, 0.0, 0.0), (2.0, 0.0, 0.0),
(3.0, 0.0, 0.0), (4.0, 0.0, 0.0),
(5.0, 0.0, 0.0)))
def test_split_twice(self):
"""
C D
+ +
| |
A +--==+===+==--+ B
| |
+---+
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
# Create a topology
topology = TopologyFactory.create(no_path=True)
topology.add_path(ab, start=0.1, end=0.9)
topogeom = topology.geom
self.assertEqual(len(topology.paths.all()), 1)
PathFactory.create(name="CD", geom=LineString((1, 2), (1, -2),
(3, -2), (3, 2)))
self.assertEqual(len(topology.paths.all()), 3)
self.assertEqual(len(ab.aggregations.all()), 1)
aggr_ab = ab.aggregations.all()[0]
self.assertEqual((0.4, 1.0), (aggr_ab.start_position, aggr_ab.end_position))
ab2 = Path.objects.filter(name="AB").exclude(pk=ab.pk)[0]
ab3 = Path.objects.filter(name="AB").exclude(pk__in=[ab.pk, ab2.pk])[0]
if ab2.geom.length < ab3.geom.length:
ab2, ab3 = ab3, ab2
aggr_ab2 = ab2.aggregations.all()[0]
aggr_ab3 = ab3.aggregations.all()[0]
self.assertEqual((0.0, 1.0), (aggr_ab2.start_position, aggr_ab2.end_position))
self.assertEqual((0.0, 0.6), (aggr_ab3.start_position, aggr_ab3.end_position))
topology.reload()
self.assertNotEqual(topology.geom, topogeom)
self.assertEqual(topology.geom.coords[0], topogeom.coords[0])
self.assertEqual(topology.geom.coords[-1], topogeom.coords[-1])
def test_split_twice_reversed(self):
"""
C D
+ +
| |
A +--==+===+==--+ B
| |
+---+
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
# Create a topology
topology = TopologyFactory.create(no_path=True)
topology.add_path(ab, start=0.9, end=0.1, order=1)
self.assertEqual(len(topology.paths.all()), 1)
PathFactory.create(name="CD", geom=LineString((1, 2), (1, -2),
(3, -2), (3, 2)))
self.assertEqual(len(topology.paths.all()), 3)
self.assertEqual(len(ab.aggregations.all()), 1)
aggr_ab = ab.aggregations.all()[0]
self.assertEqual((1.0, 0.4), (aggr_ab.start_position, aggr_ab.end_position))
ab2 = Path.objects.filter(name="AB").exclude(pk=ab.pk)[0]
ab3 = Path.objects.filter(name="AB").exclude(pk__in=[ab.pk, ab2.pk])[0]
aggr_ab2 = ab2.aggregations.all()[0]
aggr_ab3 = ab3.aggregations.all()[0]
if aggr_ab2.start_position == 1.0:
self.assertEqual((1.0, 0.0), (aggr_ab2.start_position, aggr_ab2.end_position))
self.assertEqual((0.6, 0.0), (aggr_ab3.start_position, aggr_ab3.end_position))
else:
# Depended on postgresql fetch order, `ab2` was actually `ab3`
self.assertEqual((1.0, 0.0), (aggr_ab3.start_position, aggr_ab3.end_position))
self.assertEqual((0.6, 0.0), (aggr_ab2.start_position, aggr_ab2.end_position))
topology.reload()
self.assertEqual(topology.geom, LineString((3.6000000000000001, 0), (3, 0),
(1.0, 0.0), (0.4, 0.0)))
def test_split_on_update(self):
""" + E
:
||
A +-----------+ B A +----++---+ B
||
C +-====-+ D C +--===+ D
"""
PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
cd = PathFactory.create(name="CD", geom=LineString((0, -1), (4, -1)))
# Create a topology
topology = TopologyFactory.create(no_path=True)
topology.add_path(cd, start=0.3, end=0.9)
self.assertEqual(len(topology.paths.all()), 1)
cd.geom = LineString((0, -1), (2, -1), (2, 2))
cd.save()
cd2 = Path.objects.filter(name="CD").exclude(pk=cd.pk)[0]
self.assertEqual(len(topology.paths.all()), 2)
self.assertEqual(len(cd.aggregations.all()), 1)
self.assertEqual(len(cd2.aggregations.all()), 1)
aggr_cd = cd.aggregations.all()[0]
aggr_cd2 = cd2.aggregations.all()[0]
self.assertEqual((0.5, 1.0), (aggr_cd.start_position, aggr_cd.end_position))
self.assertEqual((0.0, 0.75), (aggr_cd2.start_position, aggr_cd2.end_position))
def test_split_on_update_2(self):
""" + E
:
:
A +-----------+ B A +-----+---+ B
:
C +-==------+ D C +--===+ D
"""
PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
cd = PathFactory.create(name="CD", geom=LineString((0, -1), (4, -1)))
# Create a topology
topology = TopologyFactory.create(no_path=True)
topology.add_path(cd, start=0.15, end=0.3)
self.assertEqual(len(topology.paths.all()), 1)
cd.geom = LineString((0, -1), (2, -1), (2, 2))
cd.save()
cd2 = Path.objects.filter(name="CD").exclude(pk=cd.pk)[0]
self.assertEqual(len(topology.paths.all()), 1)
self.assertEqual(len(cd.aggregations.all()), 1)
self.assertEqual(len(cd2.aggregations.all()), 0)
aggr_cd = cd.aggregations.all()[0]
self.assertEqual((0.25, 0.5), (aggr_cd.start_position, aggr_cd.end_position))
def test_split_on_update_3(self):
""" + E
||
||
A +-----------+ B A +-----+---+ B
:
C +------==-+ D C +-----+ D
"""
PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
cd = PathFactory.create(name="CD", geom=LineString((0, -1), (4, -1)))
# Create a topology
topology = TopologyFactory.create(no_path=True)
topology.add_path(cd, start=0.7, end=0.85)
self.assertEqual(len(topology.paths.all()), 1)
cd.geom = LineString((0, -1), (2, -1), (2, 2))
cd.save()
cd2 = Path.objects.filter(name="CD").exclude(pk=cd.pk)[0]
self.assertEqual(len(topology.paths.all()), 1)
self.assertEqual(len(cd.aggregations.all()), 0)
self.assertEqual(len(cd2.aggregations.all()), 1)
aggr_cd2 = cd2.aggregations.all()[0]
self.assertEqual((0.25, 0.625), (aggr_cd2.start_position, aggr_cd2.end_position))
def test_split_on_return_topology(self):
"""
A B C D
+-------+-------+-------+
>=================+
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
bc = PathFactory.create(name="BC", geom=LineString((4, 0), (8, 0)))
cd = PathFactory.create(name="CD", geom=LineString((8, 0), (12, 0)))
topology = TopologyFactory.create(no_path=True)
topology.add_path(ab, start=0.5, end=1, order=1)
topology.add_path(bc, start=0, end=1, order=2)
topology.add_path(cd, start=0.0, end=0.5, order=3)
topology.add_path(cd, start=0.5, end=0.5, order=4)
topology.add_path(cd, start=0.5, end=0.0, order=5)
topology.add_path(bc, start=1, end=0, order=6)
topology.add_path(ab, start=1, end=0.5, order=7)
self.assertEqual(len(topology.aggregations.all()), 7)
topogeom = topology.geom
PathFactory.create(name="split", geom=LineString((9, -1), (9, 1)))
topology.reload()
self.assertItemsEqual(topology.aggregations.order_by('order').values_list('order', 'path__name'),
[(1, 'AB'), (2, 'BC'), (3, 'CD'), (3, 'CD'), (4, 'CD'),
(5, 'CD'), (5, 'CD'), (6, 'BC'), (7, 'AB')])
self.assertTrue(topology.geom.equals(topogeom))
def test_split_on_topology_with_offset(self):
"""
A B
+---------------+
>=======+
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
topology = TopologyFactory.create(no_path=True, offset=1)
topology.add_path(ab, start=0.25, end=0.75, order=1)
self.assertEqual(len(topology.aggregations.all()), 1)
topogeom = topology.geom
PathFactory.create(name="split", geom=LineString((2, -2), (2, 2)))
topology.reload()
self.assertItemsEqual(topology.aggregations.order_by('order').values_list('order', 'path__name'),
[(1, 'AB'), (1, 'AB')])
self.assertTrue(topology.geom.equals(topogeom))
def test_split_on_topology_with_offset_and_point(self):
"""
A B
+---------------+
>=======+
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (5, 0)))
topology = TopologyFactory.create(no_path=True, offset=1)
topology.add_path(ab, start=0.2, end=0.6, order=1)
topology.add_path(ab, start=0.6, end=0.6, order=2)
topology.add_path(ab, start=0.6, end=0.8, order=3)
self.assertEqual(len(topology.aggregations.all()), 3)
topogeom = topology.geom
PathFactory.create(name="split", geom=LineString((2, -2), (2, 2)))
topology.reload()
self.assertItemsEqual(topology.aggregations.order_by('order').values_list('order', 'path__name'),
[(1, 'AB'), (1, 'AB'), (2, 'AB'), (3, 'AB')])
self.assertTrue(topology.geom.equals(topogeom))
class SplitPathPointTopologyTest(TestCase):
def test_split_tee_1(self):
"""
C
A +-----X----+ B
|
+ AB exists with topology at C.
D Add CD.
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
topology = TopologyFactory.create(no_path=True)
topology.add_path(ab, start=0.5, end=0.5)
self.assertEqual(len(topology.paths.all()), 1)
cd = PathFactory.create(geom=LineString((2, 0), (2, 2)))
cb = Path.objects.filter(name="AB").exclude(pk=ab.pk)[0]
self.assertEqual(len(topology.paths.all()), 3)
self.assertEqual(len(ab.aggregations.all()), 1)
aggr_ab = ab.aggregations.all()[0]
self.assertEqual(len(cb.aggregations.all()), 1)
aggr_cb = cb.aggregations.all()[0]
self.assertEqual(len(cd.aggregations.all()), 1)
aggr_cd = cd.aggregations.all()[0]
self.assertEqual((1.0, 1.0), (aggr_ab.start_position, aggr_ab.end_position))
self.assertEqual((0.0, 0.0), (aggr_cb.start_position, aggr_cb.end_position))
self.assertEqual((0.0, 0.0), (aggr_cd.start_position, aggr_cd.end_position))
def test_split_tee_2(self):
"""
C
A +--X--+----+ B
|
+ AB exists.
D Add CD.
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
topology = TopologyFactory.create(no_path=True)
topology.add_path(ab, start=0.25, end=0.25)
self.assertEqual(len(topology.paths.all()), 1)
PathFactory.create(geom=LineString((2, 0), (2, 2)))
self.assertEqual(len(topology.paths.all()), 1)
self.assertEqual(len(ab.aggregations.all()), 1)
aggr_ab = ab.aggregations.all()[0]
self.assertEqual((0.5, 0.5), (aggr_ab.start_position, aggr_ab.end_position))
def test_split_tee_3(self):
"""
C
A +-----+--X--+ B
|
+ AB exists.
D Add CD.
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
topology = TopologyFactory.create(no_path=True)
topology.add_path(ab, start=0.75, end=0.75)
self.assertEqual(len(topology.paths.all()), 1)
PathFactory.create(geom=LineString((2, 0), (2, 2)))
cb = Path.objects.filter(name="AB").exclude(pk=ab.pk)[0]
self.assertEqual(len(topology.paths.all()), 1)
self.assertEqual(len(ab.aggregations.all()), 0)
self.assertEqual(len(cb.aggregations.all()), 1)
aggr_cb = cb.aggregations.all()[0]
self.assertEqual((0.5, 0.5), (aggr_cb.start_position, aggr_cb.end_position))
def test_split_tee_4(self):
"""
C
A X-----+----+ B
|
+ AB exists.
D Add CD.
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
topology = TopologyFactory.create(no_path=True)
topology.add_path(ab, start=0.0, end=0.0)
self.assertEqual(len(topology.paths.all()), 1)
PathFactory.create(geom=LineString((2, 0), (2, 2)))
cb = Path.objects.filter(name="AB").exclude(pk=ab.pk)[0]
self.assertEqual(len(topology.paths.all()), 1)
self.assertEqual(len(ab.aggregations.all()), 1)
self.assertEqual(len(cb.aggregations.all()), 0)
aggr_ab = ab.aggregations.all()[0]
self.assertEqual((0.0, 0.0), (aggr_ab.start_position, aggr_ab.end_position))
def test_split_tee_5(self):
"""
C
A +-----+----X B
|
+ AB exists.
D Add CD.
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
topology = TopologyFactory.create(no_path=True)
topology.add_path(ab, start=1.0, end=1.0)
self.assertEqual(len(topology.paths.all()), 1)
PathFactory.create(name="CD", geom=LineString((2, 0), (2, 2)))
cb = Path.objects.filter(name="AB").exclude(pk=ab.pk)[0]
self.assertEqual(len(topology.paths.all()), 1)
self.assertEqual(len(ab.aggregations.all()), 0)
self.assertEqual(len(cb.aggregations.all()), 1)
aggr_cb = cb.aggregations.all()[0]
self.assertEqual((1.0, 1.0), (aggr_cb.start_position, aggr_cb.end_position))
def test_split_tee_6(self):
"""
X
C
A +-----+-----+ B
|
+ AB exists. Add CD.
D Point with offset is now linked to AC.
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (8, 0)))
poi = Point(1, 3, srid=settings.SRID)
poi.transform(settings.API_SRID)
topology = Topology.deserialize({'lat': poi.y, 'lng': poi.x})
aggr = topology.aggregations.all()[0]
position = topology.geom.coords
self.assertTrue(almostequal(3, topology.offset))
self.assertTrue(almostequal(0.125, aggr.start_position))
self.assertTrue(almostequal(0.125, aggr.end_position))
# Add CD
PathFactory.create(name="CD", geom=LineString((4, 0), (4, 2)))
cb = Path.objects.filter(name="AB").exclude(pk=ab.pk)[0]
aggr_ab = ab.aggregations.all()[0]
topology.reload()
self.assertTrue(almostequal(3, topology.offset))
self.assertEqual(len(topology.paths.all()), 1)
self.assertEqual(len(ab.aggregations.all()), 1)
self.assertEqual(len(cb.aggregations.all()), 0)
self.assertEqual(position, topology.geom.coords)
self.assertTrue(almostequal(0.5, aggr_ab.start_position))
self.assertTrue(almostequal(0.5, aggr_ab.end_position))
def test_split_tee_7(self):
"""
X
C
A +-----+-----+ B
|
+ AB exists. Add CD.
D Point with offset is now linked to CB.
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (8, 0)))
poi = Point(7, 3, srid=settings.SRID)
poi.transform(settings.API_SRID)
topology = Topology.deserialize({'lat': poi.y, 'lng': poi.x})
aggr = topology.aggregations.all()[0]
position = topology.geom.coords
self.assertTrue(almostequal(3, topology.offset))
self.assertTrue(almostequal(0.875, aggr.start_position))
self.assertTrue(almostequal(0.875, aggr.end_position))
# Add CD
PathFactory.create(name="CD", geom=LineString((4, 0), (4, 2)))
cb = Path.objects.filter(name="AB").exclude(pk=ab.pk)[0]
topology.reload()
self.assertEqual(len(topology.paths.all()), 1)
self.assertEqual(len(ab.aggregations.all()), 0)
self.assertEqual(len(cb.aggregations.all()), 1)
self.assertTrue(almostequal(3, topology.offset), topology.offset)
self.assertEqual(position, topology.geom.coords)
aggr_cb = cb.aggregations.all()[0]
self.assertTrue(almostequal(0.75, aggr_cb.start_position))
self.assertTrue(almostequal(0.75, aggr_cb.end_position))
def test_split_on_update(self):
""" + D
:
:
A +-----------+ B A +-----X---+ B
:
C +---X---+ D C +----+
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
cd = PathFactory.create(name="CD", geom=LineString((0, 1), (4, 1)))
topology = TopologyFactory.create(no_path=True)
topology.add_path(cd, start=0.5, end=0.5)
self.assertEqual(len(topology.paths.all()), 1)
cd.geom = LineString((2, -2), (2, 2))
cd.save()
ab2 = Path.objects.filter(name="AB").exclude(pk=ab.pk)[0]
cd2 = Path.objects.filter(name="CD").exclude(pk=cd.pk)[0]
self.assertEqual(len(ab2.aggregations.all()), 1)
self.assertEqual(len(cd2.aggregations.all()), 1)
self.assertEqual(len(cd.aggregations.all()), 1)
self.assertEqual(len(ab.aggregations.all()), 1)
self.assertEqual(len(topology.paths.all()), 4)
aggr_ab = ab.aggregations.all()[0]
aggr_ab2 = ab2.aggregations.all()[0]
aggr_cd = cd.aggregations.all()[0]
aggr_cd2 = cd2.aggregations.all()[0]
self.assertEqual((1.0, 1.0), (aggr_ab.start_position, aggr_ab.end_position))
self.assertEqual((0.0, 0.0), (aggr_ab2.start_position, aggr_ab2.end_position))
self.assertEqual((1.0, 1.0), (aggr_cd.start_position, aggr_cd.end_position))
self.assertEqual((0.0, 0.0), (aggr_cd2.start_position, aggr_cd2.end_position))
def test_split_on_update_2(self):
""" + D
:
:
A +-----------+ B A +-----+---+ B
:
C +-X-----+ D C +--X-+
"""
PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
cd = PathFactory.create(name="CD", geom=LineString((0, 1), (4, 1)))
topology = TopologyFactory.create(no_path=True)
topology.add_path(cd, start=0.25, end=0.25)
self.assertEqual(len(topology.paths.all()), 1)
cd.geom = LineString((2, -2), (2, 2))
cd.save()
cd2 = Path.objects.filter(name="CD").exclude(pk=cd.pk)[0]
self.assertEqual(len(topology.paths.all()), 1)
self.assertEqual(len(cd.aggregations.all()), 1)
self.assertEqual(len(cd2.aggregations.all()), 0)
aggr_cd = cd.aggregations.all()[0]
self.assertEqual((0.5, 0.5), (aggr_cd.start_position, aggr_cd.end_position))
def test_split_on_update_3(self):
""" + E
X
:
A +-----------+ B A +-----+---+ B
:
C +-----X-+ D C +----+ D
"""
PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
cd = PathFactory.create(name="CD", geom=LineString((0, 1), (4, 1)))
topology = TopologyFactory.create(no_path=True)
topology.add_path(cd, start=0.75, end=0.75)
self.assertEqual(len(topology.paths.all()), 1)
cd.geom = LineString((2, -2), (2, 2))
cd.save()
cd2 = Path.objects.filter(name="CD").exclude(pk=cd.pk)[0]
self.assertEqual(len(topology.paths.all()), 1)
self.assertEqual(len(cd.aggregations.all()), 0)
self.assertEqual(len(cd2.aggregations.all()), 1)
aggr_cd2 = cd2.aggregations.all()[0]
self.assertEqual((0.5, 0.5), (aggr_cd2.start_position, aggr_cd2.end_position))
def test_split_on_update_4(self):
""" + E
:
:
A +-----------+ B A +-----+---+ B
:
C X-------+ D C X----+ D
"""
PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
cd = PathFactory.create(name="CD", geom=LineString((0, 1), (4, 1)))
topology = TopologyFactory.create(no_path=True)
topology.add_path(cd, start=0.0, end=0.0)
self.assertEqual(len(topology.paths.all()), 1)
cd.geom = LineString((2, -2), (2, 2))
cd.save()
cd2 = Path.objects.filter(name="CD").exclude(pk=cd.pk)[0]
self.assertEqual(len(topology.paths.all()), 1)
self.assertEqual(len(cd.aggregations.all()), 1)
self.assertEqual(len(cd2.aggregations.all()), 0)
aggr_cd = cd.aggregations.all()[0]
self.assertEqual((0.0, 0.0), (aggr_cd.start_position, aggr_cd.end_position))
def test_split_on_update_5(self):
""" X E
:
:
A +-----------+ B A +-----+---+ B
:
C +-------X D C +----+ D
"""
PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
cd = PathFactory.create(name="CD", geom=LineString((0, 1), (4, 1)))
topology = TopologyFactory.create(no_path=True)
topology.add_path(cd, start=1.0, end=1.0)
self.assertEqual(len(topology.paths.all()), 1)
cd.geom = LineString((2, -2), (2, 2))
cd.save()
cd2 = Path.objects.filter(name="CD").exclude(pk=cd.pk)[0]
self.assertEqual(len(topology.paths.all()), 1)
self.assertEqual(len(cd.aggregations.all()), 0)
self.assertEqual(len(cd2.aggregations.all()), 1)
aggr_cd2 = cd2.aggregations.all()[0]
self.assertEqual((1.0, 1.0), (aggr_cd2.start_position, aggr_cd2.end_position))
def test_split_on_update_6(self):
"""
D
A +-----------+ B A +-----X---+ B
:
C +-------X D :
+
C
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
cd = PathFactory.create(name="CD", geom=LineString((0, 1), (4, 1)))
topology = TopologyFactory.create(no_path=True)
topology.add_path(cd, start=1.0, end=1.0)
self.assertEqual(len(topology.paths.all()), 1)
cd.geom = LineString((2, -2), (2, 0))
cd.save()
db = Path.objects.filter(name="AB").exclude(pk=ab.pk)[0]
self.assertEqual(len(ab.aggregations.all()), 1)
self.assertEqual(len(db.aggregations.all()), 1)
self.assertEqual(len(cd.aggregations.all()), 1)
self.assertEqual(len(topology.paths.all()), 3)
aggr_ab = ab.aggregations.all()[0]
aggr_db = db.aggregations.all()[0]
aggr_cd = cd.aggregations.all()[0]
self.assertEqual((1.0, 1.0), (aggr_ab.start_position, aggr_ab.end_position))
self.assertEqual((0.0, 0.0), (aggr_db.start_position, aggr_db.end_position))
self.assertEqual((1.0, 1.0), (aggr_cd.start_position, aggr_cd.end_position))
def test_split_on_update_7(self):
"""
C
A +-----------+ B A +-----X---+ B
:
C X-------+ D :
+ D
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0)))
cd = PathFactory.create(name="CD", geom=LineString((0, 1), (4, 1)))
topology = TopologyFactory.create(no_path=True)
topology.add_path(cd, start=0.0, end=0.0)
self.assertEqual(len(topology.paths.all()), 1)
cd.geom = LineString((2, 0), (2, -2))
cd.save()
cb = Path.objects.filter(name="AB").exclude(pk=ab.pk)[0]
self.assertEqual(len(ab.aggregations.all()), 1)
self.assertEqual(len(cb.aggregations.all()), 1)
self.assertEqual(len(cd.aggregations.all()), 1)
self.assertEqual(len(topology.paths.all()), 3)
aggr_ab = ab.aggregations.all()[0]
aggr_cb = cb.aggregations.all()[0]
aggr_cd = cd.aggregations.all()[0]
self.assertEqual((1.0, 1.0), (aggr_ab.start_position, aggr_ab.end_position))
self.assertEqual((0.0, 0.0), (aggr_cb.start_position, aggr_cb.end_position))
self.assertEqual((0.0, 0.0), (aggr_cd.start_position, aggr_cd.end_position))
class SplitPathGenericTopologyTest(TestCase):
def test_add_simple_path(self):
"""
A +--== ==----+ C
\\ //
\\ //
==+==
B
Add path:
D E
A +--==+--------+==----+ C
\\ //
\\ //
==+==
B
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0),
(6, -2), (8, -2)))
bc = PathFactory.create(name="BC", geom=LineString((8, -2), (10, -2),
(12, 0), (14, 0)))
topology = TopologyFactory.create(no_path=True)
topology.add_path(ab, start=0.25, end=1.0)
topology.add_path(bc, start=0.0, end=0.75)
self.assertEqual(len(topology.paths.all()), 2)
originalgeom = LineString((2.2071067811865475, 0), (4, 0), (6, -2), (8, -2), (10, -2), (12, 0), (12.2928932188134521, 0))
self.assertEqual(topology.geom, originalgeom)
# Add a path
de = PathFactory.create(name="DE", geom=LineString((4, 0), (12, 0)))
self.assertEqual(len(Path.objects.all()), 5)
ab_2 = Path.objects.filter(name="AB").exclude(pk=ab.pk)[0]
bc_2 = Path.objects.filter(name="BC").exclude(pk=bc.pk)[0]
# Topology aggregations were updated
topology.reload()
self.assertEqual(len(ab.aggregations.all()), 1)
self.assertEqual(len(ab_2.aggregations.all()), 1)
self.assertEqual(len(bc.aggregations.all()), 1)
self.assertEqual(len(bc_2.aggregations.all()), 1)
self.assertEqual(len(de.aggregations.all()), 0)
aggr_ab = ab.aggregations.all()[0]
aggr_ab2 = ab_2.aggregations.all()[0]
aggr_bc = bc.aggregations.all()[0]
aggr_bc2 = bc_2.aggregations.all()[0]
self.assertEqual((0.551776695296637, 1.0), (aggr_ab.start_position, aggr_ab.end_position))
self.assertEqual((0.0, 1.0), (aggr_ab2.start_position, aggr_ab2.end_position))
self.assertEqual((0.0, 1.0), (aggr_bc.start_position, aggr_bc.end_position))
self.assertEqual((0.0, 0.146446609406726), (aggr_bc2.start_position, aggr_bc2.end_position))
# But topology resulting geometry did not change
self.assertEqual(topology.geom, originalgeom)
def test_add_path_converge(self):
"""
A +--== ==----+ C
\\ //
\\ //
==+==
B
Add path:
D E
A +--==+--------+==----+ C
\\ //
\\ //
==+==
B
"""
ab = PathFactory.create(name="AB", geom=LineString((0, 0), (4, 0),
(6, -2), (8, -2)))
cb = PathFactory.create(name="CB", geom=LineString((14, 0), (12, 0),
(10, -2), (8, -2)))
topology = TopologyFactory.create(no_path=True)
topology.add_path(ab, start=0.25, end=1.0)
topology.add_path(cb, start=1.0, end=0.25)
self.assertEqual(len(topology.paths.all()), 2)
originalgeom = LineString((2.2071067811865475, 0), (4, 0), (6, -2), (8, -2), (10, -2), (12, 0), (12.2928932188134521, 0))
self.assertEqual(topology.geom, originalgeom)
# Add a path
de = PathFactory.create(name="DE", geom=LineString((4, 0), (12, 0)))
self.assertEqual(len(Path.objects.all()), 5)
ab_2 = Path.objects.filter(name="AB").exclude(pk=ab.pk)[0]
cb_2 = Path.objects.filter(name="CB").exclude(pk=cb.pk)[0]
# Topology aggregations were updated
topology.reload()
self.assertEqual(len(ab.aggregations.all()), 1)
self.assertEqual(len(ab_2.aggregations.all()), 1)
self.assertEqual(len(cb.aggregations.all()), 1)
self.assertEqual(len(cb_2.aggregations.all()), 1)
self.assertEqual(len(de.aggregations.all()), 0)
aggr_ab = ab.aggregations.all()[0]
aggr_ab2 = ab_2.aggregations.all()[0]
aggr_cb = cb.aggregations.all()[0]
aggr_cb2 = cb_2.aggregations.all()[0]
self.assertEqual((0.551776695296637, 1.0), (aggr_ab.start_position, aggr_ab.end_position))
self.assertEqual((0.0, 1.0), (aggr_ab2.start_position, aggr_ab2.end_position))
self.assertEqual((1.0, 0.0), (aggr_cb2.start_position, aggr_cb2.end_position))
self.assertEqual((1.0, 0.853553390593274), (aggr_cb.start_position, aggr_cb.end_position))
# But topology resulting geometry did not change
self.assertEqual(topology.geom, originalgeom)
def test_add_path_diverge(self):
"""
A +--== ==----+ C
\\ //
\\ //
==+==
B
Add path:
D E
A +--==+--------+==----+ C
\\ //
\\ //
==+==
B
"""
ba = PathFactory.create(name="BA", geom=LineString((8, -2), (6, -2),
(4, 0), (0, 0)))
bc = PathFactory.create(name="BC", geom=LineString((8, -2), (10, -2),
(12, 0), (14, 0)))
topology = TopologyFactory.create(no_path=True)
topology.add_path(ba, start=0.75, end=0.0, order=1)
topology.add_path(bc, start=0.0, end=0.75, order=2)
self.assertEqual(len(topology.paths.all()), 2)
originalgeom = LineString((2.2071067811865475, 0), (4, 0), (6, -2), (8, -2), (10, -2), (12, 0), (12.2928932188134521, 0))
self.assertEqual(topology.geom, originalgeom)
# Add a path
de = PathFactory.create(name="DE", geom=LineString((4, 0), (12, 0)))
self.assertEqual(len(Path.objects.all()), 5)
ba_2 = Path.objects.filter(name="BA").exclude(pk=ba.pk)[0]
bc_2 = Path.objects.filter(name="BC").exclude(pk=bc.pk)[0]
# Topology aggregations were updated
topology.reload()
self.assertEqual(len(ba.aggregations.all()), 1)
self.assertEqual(len(ba_2.aggregations.all()), 1)
self.assertEqual(len(bc.aggregations.all()), 1)
self.assertEqual(len(bc_2.aggregations.all()), 1)
self.assertEqual(len(de.aggregations.all()), 0)
aggr_ba = ba.aggregations.all()[0]
aggr_ba2 = ba_2.aggregations.all()[0]
aggr_bc = bc.aggregations.all()[0]
aggr_bc2 = bc_2.aggregations.all()[0]
self.assertEqual((0.448223304703363, 0.0), (aggr_ba2.start_position, aggr_ba2.end_position))
self.assertEqual((1.0, 0.0), (aggr_ba.start_position, aggr_ba.end_position))
self.assertEqual((0.0, 1.0), (aggr_bc.start_position, aggr_bc.end_position))
self.assertEqual((0.0, 0.146446609406726), (aggr_bc2.start_position, aggr_bc2.end_position))
# But topology resulting geometry did not change
originalgeom = LineString((2.2071067811865470, 0), *originalgeom[1:])
self.assertEqual(topology.geom, originalgeom)
|
py | b40ac0221ba58d65c9694f4914fbe52f49da304a | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from codecs import open
from setuptools import setup, find_packages
VERSION = "0.2.2"
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = []
setup(
name='virtual-wan',
version=VERSION,
description='Manage virtual WAN, hubs, VPN gateways and VPN sites.',
license='MIT',
author='Microsoft Corporation',
author_email='[email protected]',
url='https://github.com/Azure/azure-cli-extensions/tree/master/src/virtual-wan',
classifiers=CLASSIFIERS,
package_data={'azext_vwan': ['azext_metadata.json']},
packages=find_packages(),
install_requires=DEPENDENCIES
)
|
py | b40ac0945c2e972968f2f6790cb52ed3f4a30262 | from .baidu_lookup import BaiduLookup
from .sogou_lookup import SogouLookup
|
py | b40ac2662442d09e172738b84c54344f4ffb07dc |
import array
import time
import struct
from threading import Thread
import requests
#import for syntactical ease
from donkeycar.parts.web_controller.web import LocalWebController
class Joystick():
'''
An interface to a physical joystick available at /dev/input
'''
def __init__(self, dev_fn='/dev/input/js0'):
self.axis_states = {}
self.button_states = {}
self.axis_map = []
self.button_map = []
self.jsdev = None
self.dev_fn = dev_fn
# These constants were borrowed from linux/input.h
self.axis_names = {
0x00 : 'x',
0x01 : 'y',
0x02 : 'z',
0x03 : 'rx',
0x04 : 'ry',
0x05 : 'rz',
0x06 : 'trottle',
0x07 : 'rudder',
0x08 : 'wheel',
0x09 : 'gas',
0x0a : 'brake',
0x10 : 'hat0x',
0x11 : 'hat0y',
0x12 : 'hat1x',
0x13 : 'hat1y',
0x14 : 'hat2x',
0x15 : 'hat2y',
0x16 : 'hat3x',
0x17 : 'hat3y',
0x18 : 'pressure',
0x19 : 'distance',
0x1a : 'tilt_x',
0x1b : 'tilt_y',
0x1c : 'tool_width',
0x20 : 'volume',
0x28 : 'misc',
}
self.button_names = {
0x120 : 'trigger',
0x121 : 'thumb',
0x122 : 'thumb2',
0x123 : 'top',
0x124 : 'top2',
0x125 : 'pinkie',
0x126 : 'base',
0x127 : 'base2',
0x128 : 'base3',
0x129 : 'base4',
0x12a : 'base5',
0x12b : 'base6',
#PS3 sixaxis specific
0x12c : "triangle",
0x12d : "circle",
0x12e : "cross",
0x12f : 'square',
0x130 : 'a',
0x131 : 'b',
0x132 : 'c',
0x133 : 'x',
0x134 : 'y',
0x135 : 'z',
0x136 : 'tl',
0x137 : 'tr',
0x138 : 'tl2',
0x139 : 'tr2',
0x13a : 'select',
0x13b : 'start',
0x13c : 'mode',
0x13d : 'thumbl',
0x13e : 'thumbr',
0x220 : 'dpad_up',
0x221 : 'dpad_down',
0x222 : 'dpad_left',
0x223 : 'dpad_right',
# XBox 360 controller uses these codes.
0x2c0 : 'dpad_left',
0x2c1 : 'dpad_right',
0x2c2 : 'dpad_up',
0x2c3 : 'dpad_down',
}
def init(self):
from fcntl import ioctl
'''
call once to setup connection to dev/input/js0 and map buttons
'''
# Open the joystick device.
print('Opening %s...' % self.dev_fn)
self.jsdev = open(self.dev_fn, 'rb')
# Get the device name.
buf = array.array('B', [0] * 64)
ioctl(self.jsdev, 0x80006a13 + (0x10000 * len(buf)), buf) # JSIOCGNAME(len)
self.js_name = buf.tobytes().decode('utf-8')
print('Device name: %s' % self.js_name)
# Get number of axes and buttons.
buf = array.array('B', [0])
ioctl(self.jsdev, 0x80016a11, buf) # JSIOCGAXES
self.num_axes = buf[0]
buf = array.array('B', [0])
ioctl(self.jsdev, 0x80016a12, buf) # JSIOCGBUTTONS
self.num_buttons = buf[0]
# Get the axis map.
buf = array.array('B', [0] * 0x40)
ioctl(self.jsdev, 0x80406a32, buf) # JSIOCGAXMAP
for axis in buf[:self.num_axes]:
axis_name = self.axis_names.get(axis, 'unknown(0x%02x)' % axis)
self.axis_map.append(axis_name)
self.axis_states[axis_name] = 0.0
# Get the button map.
buf = array.array('H', [0] * 200)
ioctl(self.jsdev, 0x80406a34, buf) # JSIOCGBTNMAP
for btn in buf[:self.num_buttons]:
btn_name = self.button_names.get(btn, 'unknown(0x%03x)' % btn)
self.button_map.append(btn_name)
self.button_states[btn_name] = 0
return True
def show_map(self):
'''
list the buttons and axis found on this joystick
'''
print ('%d axes found: %s' % (self.num_axes, ', '.join(self.axis_map)))
print ('%d buttons found: %s' % (self.num_buttons, ', '.join(self.button_map)))
def poll(self):
'''
query the state of the joystick, returns button which was pressed, if any,
and axis which was moved, if any. button_state will be None, 1, or 0 if no changes,
pressed, or released. axis_val will be a float from -1 to +1. button and axis will
be the string label determined by the axis map in init.
'''
button = None
button_state = None
axis = None
axis_val = None
# Main event loop
evbuf = self.jsdev.read(8)
if evbuf:
tval, value, typev, number = struct.unpack('IhBB', evbuf)
if typev & 0x80:
#ignore initialization event
return button, button_state, axis, axis_val
if typev & 0x01:
button = self.button_map[number]
if button:
self.button_states[button] = value
button_state = value
if typev & 0x02:
axis = self.axis_map[number]
if axis:
fvalue = value / 32767.0
self.axis_states[axis] = fvalue
axis_val = fvalue
return button, button_state, axis, axis_val
class JoystickController(object):
'''
Joystick client using access to local physical input
'''
def __init__(self, poll_delay=0.0,
max_throttle=1.0,
steering_axis='x',
throttle_axis='rz',
steering_scale=1.0,
throttle_scale=-1.0,
dev_fn='/dev/input/js0',
auto_record_on_throttle=True):
self.angle = 0.0
self.throttle = 0.0
self.mode = 'user'
self.poll_delay = poll_delay
self.running = True
self.max_throttle = max_throttle
self.steering_axis = steering_axis
self.throttle_axis = throttle_axis
self.steering_scale = steering_scale
self.throttle_scale = throttle_scale
self.recording = False
self.constant_throttle = False
self.auto_record_on_throttle = auto_record_on_throttle
self.dev_fn = dev_fn
self.js = None
#We expect that the framework for parts will start a new
#thread for our update fn. We used to do that and it caused
#two threads to be polling for js events.
def on_throttle_changes(self):
'''
turn on recording when non zero throttle in the user mode.
'''
if self.auto_record_on_throttle:
self.recording = (self.throttle != 0.0 and self.mode == 'user')
def init_js(self):
'''
attempt to init joystick
'''
try:
self.js = Joystick(self.dev_fn)
self.js.init()
except FileNotFoundError:
print(self.dev_fn, "not found.")
self.js = None
return self.js is not None
def update(self):
'''
poll a joystick for input events
button map name => PS3 button => function
* top2 = PS3 dpad up => increase throttle scale
* base = PS3 dpad down => decrease throttle scale
* base2 = PS3 dpad left => increase steering scale
* pinkie = PS3 dpad right => decrease steering scale
* trigger = PS3 select => switch modes
* top = PS3 start => toggle constant throttle
* base5 = PS3 left trigger 1
* base3 = PS3 left trigger 2
* base6 = PS3 right trigger 1
* base4 = PS3 right trigger 2
* thumb2 = PS3 right thumb
* thumb = PS3 left thumb
* circle = PS3 circrle => toggle recording
* triangle = PS3 triangle => increase max throttle
* cross = PS3 cross => decrease max throttle
'''
#wait for joystick to be online
while self.running and not self.init_js():
time.sleep(5)
count=0
while self.running:
button, button_state, axis, axis_val = self.js.poll()
if axis == self.steering_axis:
self.angle = self.steering_scale * axis_val
if (count==1):
count=0
if (self.angle<0):
r=requests.get('https://8hlrhk9awh.execute-api.us-east-1.amazonaws.com/Gamma/TextToSpeechPolly?voice=Joanna&text=Turning Left')
elif (self.angle>0):
r=requests.get('https://8hlrhk9awh.execute-api.us-east-1.amazonaws.com/Gamma/TextToSpeechPolly?voice=Joanna&text=Turning Right')
else:
count=count+1
print("angle", self.angle)
if axis == self.throttle_axis:
#this value is often reversed, with positive value when pulling down
self.throttle = (self.throttle_scale * axis_val * self.max_throttle)
print("throttle", self.throttle)
self.on_throttle_changes()
if button == 'trigger' and button_state == 1:
'''
switch modes from:
user: human controlled steer and throttle
local_angle: ai steering, human throttle
local: ai steering, ai throttle
'''
if self.mode == 'user':
self.mode = 'local_angle'
elif self.mode == 'local_angle':
self.mode = 'local'
else:
self.mode = 'user'
print('new mode:', self.mode)
if button == 'circle' and button_state == 1:
'''
toggle recording on/off
'''
if self.auto_record_on_throttle:
print('auto record on throttle is enabled.')
elif self.recording:
self.recording = False
else:
self.recording = True
print('recording:', self.recording)
if button == 'triangle' and button_state == 1:
'''
increase max throttle setting
'''
self.max_throttle = round(min(1.0, self.max_throttle + 0.01), 2)
if self.constant_throttle:
self.throttle = self.max_throttle
self.on_throttle_changes()
print('max_throttle:', self.max_throttle)
if button == 'cross' and button_state == 1:
'''
decrease max throttle setting
'''
self.max_throttle = round(max(0.0, self.max_throttle - 0.01), 2)
if self.constant_throttle:
self.throttle = self.max_throttle
self.on_throttle_changes()
print('max_throttle:', self.max_throttle)
if button == 'base' and button_state == 1:
'''
increase throttle scale
'''
self.throttle_scale = round(min(0.0, self.throttle_scale + 0.05), 2)
print('throttle_scale:', self.throttle_scale)
if button == 'top2' and button_state == 1:
'''
decrease throttle scale
'''
self.throttle_scale = round(max(-1.0, self.throttle_scale - 0.05), 2)
print('throttle_scale:', self.throttle_scale)
if button == 'base2' and button_state == 1:
'''
increase steering scale
'''
r=requests.get('https://8hlrhk9awh.execute-api.us-east-1.amazonaws.com/Gamma/TextToSpeechPolly?voice=Joanna&text=Turning Left')
self.steering_scale = round(min(1.0, self.steering_scale + 0.05), 2)
print('steering_scale:', self.steering_scale)
if button == 'pinkie' and button_state == 1:
'''
decrease steering scale
'''
r=requests.get('https://8hlrhk9awh.execute-api.us-east-1.amazonaws.com/Gamma/TextToSpeechPolly?voice=Joanna&text=Turning right')
self.steering_scale = round(max(0.0, self.steering_scale - 0.05), 2)
print('steering_scale:', self.steering_scale)
if button == 'top' and button_state == 1:
'''
toggle constant throttle
'''
if self.constant_throttle:
self.constant_throttle = False
self.throttle = 0
self.on_throttle_changes()
else:
self.constant_throttle = True
self.throttle = self.max_throttle
self.on_throttle_changes()
print('constant_throttle:', self.constant_throttle)
time.sleep(self.poll_delay)
def run_threaded(self, img_arr=None):
self.img_arr = img_arr
return self.angle, self.throttle, self.mode, self.recording
def run(self, img_arr=None):
raise Exception("We expect for this part to be run with the threaded=True argument.")
return False
def shutdown(self):
self.running = False
time.sleep(0.5)
|
py | b40ac2a81400184fec9883de461f6da330970750 | # -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2008 Tom Kralidis
#
# Authors : Tom Kralidis <[email protected]>
#
# Contact email: [email protected]
# =============================================================================
from __future__ import (absolute_import, division, print_function)
import sys
from dateutil import parser
from datetime import datetime
import pytz
from owslib.etree import etree, ParseError
from owslib.namespaces import Namespaces
try: # Python 3
from urllib.parse import urlsplit, urlencode
except ImportError: # Python 2
from urlparse import urlsplit
from urllib import urlencode
try:
from StringIO import StringIO # Python 2
BytesIO = StringIO
except ImportError:
from io import StringIO, BytesIO # Python 3
import cgi
import re
from copy import deepcopy
import warnings
import six
import requests
"""
Utility functions and classes
"""
class ServiceException(Exception):
#TODO: this should go in ows common module when refactored.
pass
# http://stackoverflow.com/questions/6256183/combine-two-dictionaries-of-dictionaries-python
dict_union = lambda d1,d2: dict((x,(dict_union(d1.get(x,{}),d2[x]) if
isinstance(d2.get(x),dict) else d2.get(x,d1.get(x)))) for x in
set(list(d1.keys())+list(d2.keys())))
# Infinite DateTimes for Python. Used in SWE 2.0 and other OGC specs as "INF" and "-INF"
class InfiniteDateTime(object):
def __lt__(self, other):
return False
def __gt__(self, other):
return True
def timetuple(self):
return tuple()
class NegativeInfiniteDateTime(object):
def __lt__(self, other):
return True
def __gt__(self, other):
return False
def timetuple(self):
return tuple()
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
def format_string(prop_string):
"""
Formats a property string to remove spaces and go from CamelCase to pep8
from: http://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-camel-case
"""
if prop_string is None:
return ''
st_r = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', prop_string)
st_r = st_r.replace(' ','')
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', st_r).lower()
def xml_to_dict(root, prefix=None, depth=1, diction=None):
"""
Recursively iterates through an xml element to convert each element in the tree to a (key,val). Where key is the element
tag and val is the inner-text of the element. Note that this recursively go through the tree until the depth specified.
Parameters
===========
:root - root xml element, starting point of iteration
:prefix - a string to prepend to the resulting key (optional)
:depth - the number of depths to process in the tree (optional)
:diction - the dictionary to insert the (tag,text) pairs into (optional)
Return
=======
Dictionary of (key,value); where key is the element tag stripped of namespace and cleaned up to be pep8 and
value is the inner-text of the element. Note that duplicate elements will be replaced by the last element of the
same tag in the tree.
"""
ret = diction if diction is not None else dict()
for child in root:
val = testXMLValue(child)
# skip values that are empty or None
if val is None or val == '':
if depth > 1:
ret = xml_to_dict(child,prefix=prefix,depth=(depth-1),diction=ret)
continue
key = format_string(child.tag.split('}')[-1])
if prefix is not None:
key = prefix + key
ret[key] = val
if depth > 1:
ret = xml_to_dict(child,prefix=prefix,depth=(depth-1),diction=ret)
return ret
class ResponseWrapper(object):
"""
Return object type from openURL.
Provides a thin shim around requests response object to maintain code compatibility.
"""
def __init__(self, response):
self._response = response
def info(self):
return self._response.headers
def read(self):
return self._response.content
def geturl(self):
return self._response.url
# @TODO: __getattribute__ for poking at response
def openURL(url_base, data=None, method='Get', cookies=None, username=None, password=None, timeout=30, headers=None):
"""
Function to open URLs.
Uses requests library but with additional checks for OGC service exceptions and url formatting.
Also handles cookies and simple user password authentication.
"""
headers = headers if headers is not None else {}
rkwargs = {}
rkwargs['timeout'] = timeout
auth = None
if username and password:
auth = (username, password)
rkwargs['auth'] = auth
# FIXUP for WFS in particular, remove xml style namespace
# @TODO does this belong here?
method = method.split("}")[-1]
if method.lower() == 'post':
try:
xml = etree.fromstring(data)
headers['Content-Type'] = 'text/xml'
except (ParseError, UnicodeEncodeError):
pass
rkwargs['data'] = data
elif method.lower() == 'get':
rkwargs['params'] = data
else:
raise ValueError("Unknown method ('%s'), expected 'get' or 'post'" % method)
if cookies is not None:
rkwargs['cookies'] = cookies
req = requests.request(method.upper(),
url_base,
headers=headers,
**rkwargs)
if req.status_code in [400, 401]:
raise ServiceException(req.text)
if req.status_code in [404]: # add more if needed
req.raise_for_status()
# check for service exceptions without the http header set
if 'Content-Type' in req.headers and req.headers['Content-Type'] in ['text/xml', 'application/xml']:
#just in case 400 headers were not set, going to have to read the xml to see if it's an exception report.
se_tree = etree.fromstring(req.content)
serviceException=se_tree.find('{http://www.opengis.net/ows}Exception')
if serviceException is None:
serviceException=se_tree.find('ServiceException')
if serviceException is not None:
raise ServiceException(str(serviceException.text).strip())
return ResponseWrapper(req)
#default namespace for nspath is OWS common
OWS_NAMESPACE = 'http://www.opengis.net/ows/1.1'
def nspath(path, ns=OWS_NAMESPACE):
"""
Prefix the given path with the given namespace identifier.
Parameters
----------
- path: ElementTree API Compatible path expression
- ns: the XML namespace URI.
"""
if ns is None or path is None:
return -1
components = []
for component in path.split('/'):
if component != '*':
component = '{%s}%s' % (ns, component)
components.append(component)
return '/'.join(components)
def nspath_eval(xpath, namespaces):
''' Return an etree friendly xpath '''
out = []
for chunks in xpath.split('/'):
namespace, element = chunks.split(':')
out.append('{%s}%s' % (namespaces[namespace], element))
return '/'.join(out)
def cleanup_namespaces(element):
""" Remove unused namespaces from an element """
if etree.__name__ == 'lxml.etree':
etree.cleanup_namespaces(element)
return element
else:
return etree.fromstring(etree.tostring(element))
def add_namespaces(root, ns_keys):
if isinstance(ns_keys, six.string_types):
ns_keys = [ns_keys]
namespaces = Namespaces()
ns_keys = [(x, namespaces.get_namespace(x)) for x in ns_keys]
if etree.__name__ != 'lxml.etree':
# We can just add more namespaces when not using lxml.
# We can't re-add an existing namespaces. Get a list of current
# namespaces in use
existing_namespaces = set()
for elem in root.getiterator():
if elem.tag[0] == "{":
uri, tag = elem.tag[1:].split("}")
existing_namespaces.add(namespaces.get_namespace_from_url(uri))
for key, link in ns_keys:
if link is not None and key not in existing_namespaces:
root.set("xmlns:%s" % key, link)
return root
else:
# lxml does not support setting xmlns attributes
# Update the elements nsmap with new namespaces
new_map = root.nsmap
for key, link in ns_keys:
if link is not None:
new_map[key] = link
# Recreate the root element with updated nsmap
new_root = etree.Element(root.tag, nsmap=new_map)
# Carry over attributes
for a, v in list(root.items()):
new_root.set(a, v)
# Carry over children
for child in root:
new_root.append(deepcopy(child))
return new_root
def getXMLInteger(elem, tag):
"""
Return the text within the named tag as an integer.
Raises an exception if the tag cannot be found or if its textual
value cannot be converted to an integer.
Parameters
----------
- elem: the element to search within
- tag: the name of the tag to look for
"""
e = elem.find(tag)
if e is None:
raise ValueError('Missing %s in %s' % (tag, elem))
return int(e.text.strip())
def testXMLValue(val, attrib=False):
"""
Test that the XML value exists, return val.text, else return None
Parameters
----------
- val: the value to be tested
"""
if val is not None:
if attrib:
return val.strip()
elif val.text:
return val.text.strip()
else:
return None
else:
return None
def testXMLAttribute(element, attribute):
"""
Test that the XML element and attribute exist, return attribute's value, else return None
Parameters
----------
- element: the element containing the attribute
- attribute: the attribute name
"""
if element is not None:
return element.get(attribute)
return None
def http_post(url=None, request=None, lang='en-US', timeout=10, username=None, password=None):
"""
Invoke an HTTP POST request
Parameters
----------
- url: the URL of the server
- request: the request message
- lang: the language
- timeout: timeout in seconds
"""
if url is None:
raise ValueError("URL required")
u = urlsplit(url)
headers = {
'User-Agent' : 'OWSLib (https://geopython.github.io/OWSLib)',
'Content-type' : 'text/xml',
'Accept' : 'text/xml',
'Accept-Language' : lang,
'Accept-Encoding' : 'gzip,deflate',
'Host' : u.netloc,
}
rkwargs = {}
if username is not None and password is not None:
rkwargs['auth'] = (username, password)
up = requests.post(url, request, headers=headers, **rkwargs)
return up.content
def element_to_string(element, encoding=None, xml_declaration=False):
"""
Returns a string from a XML object
Parameters
----------
- element: etree Element
- encoding (optional): encoding in string form. 'utf-8', 'ISO-8859-1', etc.
- xml_declaration (optional): whether to include xml declaration
"""
output = None
if encoding is None:
encoding = "ISO-8859-1"
if etree.__name__ == 'lxml.etree':
if xml_declaration:
if encoding in ['unicode', 'utf-8']:
output = '<?xml version="1.0" encoding="utf-8" standalone="no"?>\n%s' % \
etree.tostring(element, encoding='unicode')
else:
output = etree.tostring(element, encoding=encoding, xml_declaration=True)
else:
output = etree.tostring(element)
else:
if xml_declaration:
output = '<?xml version="1.0" encoding="%s" standalone="no"?>\n%s' % (encoding,
etree.tostring(element, encoding=encoding))
else:
output = etree.tostring(element)
return output
def xml2string(xml):
"""
Return a string of XML object
Parameters
----------
- xml: xml string
"""
warnings.warn("DEPRECIATION WARNING! You should now use the 'element_to_string' method \
The 'xml2string' method will be removed in a future version of OWSLib.")
return '<?xml version="1.0" encoding="ISO-8859-1" standalone="no"?>\n' + xml
def xmlvalid(xml, xsd):
"""
Test whether an XML document is valid
Parameters
----------
- xml: XML content
- xsd: pointer to XML Schema (local file path or URL)
"""
xsd1 = etree.parse(xsd)
xsd2 = etree.XMLSchema(xsd1)
doc = etree.parse(StringIO(xml))
return xsd2.validate(doc)
def xmltag_split(tag):
''' Return XML element bare tag name (without prefix) '''
try:
return tag.split('}')[1]
except:
return tag
def getNamespace(element):
''' Utility method to extract the namespace from an XML element tag encoded as {namespace}localname. '''
if element.tag[0]=='{':
return element.tag[1:].split("}")[0]
else:
return ""
def build_get_url(base_url, params):
''' Utility function to build a full HTTP GET URL from the service base URL and a dictionary of HTTP parameters. '''
qs = []
if base_url.find('?') != -1:
qs = cgi.parse_qsl(base_url.split('?')[1])
pars = [x[0] for x in qs]
for key,value in six.iteritems(params):
if key not in pars:
qs.append( (key,value) )
urlqs = urlencode(tuple(qs))
return base_url.split('?')[0] + '?' + urlqs
def dump(obj, prefix=''):
'''Utility function to print to standard output a generic object with all its attributes.'''
print("%s %s.%s : %s" % (prefix, obj.__module__, obj.__class__.__name__, obj.__dict__))
def getTypedValue(data_type, value):
'''Utility function to cast a string value to the appropriate XSD type. '''
if data_type == 'boolean':
return bool(value)
elif data_type == 'integer':
return int(value)
elif data_type == 'float':
return float(value)
elif data_type == 'string':
return str(value)
else:
return value # no type casting
def extract_time(element):
''' return a datetime object based on a gml text string
ex:
<gml:beginPosition>2006-07-27T21:10:00Z</gml:beginPosition>
<gml:endPosition indeterminatePosition="now"/>
If there happens to be a strange element with both attributes and text,
use the text.
ex: <gml:beginPosition indeterminatePosition="now">2006-07-27T21:10:00Z</gml:beginPosition>
Would be 2006-07-27T21:10:00Z, not 'now'
'''
if element is None:
return None
try:
dt = parser.parse(element.text)
except Exception:
att = testXMLValue(element.attrib.get('indeterminatePosition'), True)
if att and att == 'now':
dt = datetime.utcnow()
dt.replace(tzinfo=pytz.utc)
else:
dt = None
return dt
def extract_xml_list(elements):
"""
Some people don't have seperate tags for their keywords and seperate them with
a newline. This will extract out all of the keywords correctly.
"""
if elements:
keywords = [re.split(r'[\n\r]+',f.text) for f in elements if f.text]
flattened = [item.strip() for sublist in keywords for item in sublist]
remove_blank = [_f for _f in flattened if _f]
return remove_blank
else:
return []
def bind_url(url):
"""binds an HTTP GET query string endpiont"""
if url.find('?') == -1: # like http://host/wms
binder = '?'
# if like http://host/wms?foo=bar& or http://host/wms?foo=bar
if url.find('=') != -1:
if url.find('&', -1) != -1: # like http://host/wms?foo=bar&
binder = ''
else: # like http://host/wms?foo=bar
binder = '&'
# if like http://host/wms?foo
if url.find('?') != -1:
if url.find('?', -1) != -1: # like http://host/wms?
binder = ''
elif url.find('&', -1) == -1: # like http://host/wms?foo=bar
binder = '&'
return '%s%s' % (url, binder)
import logging
# Null logging handler
try:
# Python 2.7
NullHandler = logging.NullHandler
except AttributeError:
# Python < 2.7
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('owslib')
log.addHandler(NullHandler())
# OrderedDict
try: # 2.7
from collections import OrderedDict
except: # 2.6
from ordereddict import OrderedDict
def which_etree():
"""decipher which etree library is being used by OWSLib"""
which_etree = None
if 'lxml' in etree.__file__:
which_etree = 'lxml.etree'
elif 'xml/etree' in etree.__file__:
which_etree = 'xml.etree'
elif 'elementree' in etree.__file__:
which_etree = 'elementtree.ElementTree'
return which_etree
def findall(root, xpath, attribute_name=None, attribute_value=None):
"""Find elements recursively from given root element based on
xpath and possibly given attribute
:param root: Element root element where to start search
:param xpath: xpath defintion, like {http://foo/bar/namespace}ElementName
:param attribute_name: name of possible attribute of given element
:param attribute_value: value of the attribute
:return: list of elements or None
"""
found_elements = []
# python 2.6 < does not support complicated XPATH expressions used lower
if (2, 6) == sys.version_info[0:2] and which_etree() != 'lxml.etree':
elements = root.getiterator(xpath)
if attribute_name is not None and attribute_value is not None:
for element in elements:
if element.attrib.get(attribute_name) == attribute_value:
found_elements.append(element)
else:
found_elements = elements
# python at least 2.7 and/or lxml can do things much simplier
else:
if attribute_name is not None and attribute_value is not None:
xpath = '%s[@%s="%s"]' % (xpath, attribute_name, attribute_value)
found_elements = root.findall('.//' + xpath)
if found_elements == []:
found_elements = None
return found_elements
|
py | b40ac401e6e88b97702d7622f5482c42e4e7ba4e | """
sourcemap
~~~~~~~~~
:copyright: (c) 2013 by Matt Robenolt
:license: BSD, see LICENSE for more details.
"""
from .exceptions import SourceMapDecodeError # NOQA
from .decoder import SourceMapDecoder
__version__ = '0.2.1'
def load(fp, cls=None):
"Parse a sourcemap from a file-like object"
return loads(fp.read(), cls)
def loads(source, cls=None):
"Parse a sourcemap from a string"
cls = cls or SourceMapDecoder
return cls().decode(source)
def discover(source):
"Given a JavaScript file, find the sourceMappingURL line"
source = source.splitlines()
# Source maps are only going to exist at either the top or bottom of the document.
# Technically, there isn't anything indicating *where* it should exist, so we
# are generous and assume it's somewhere either in the first or last 5 lines.
# If it's somewhere else in the document, you're probably doing it wrong.
if len(source) > 10:
possibilities = source[:5] + source[-5:]
else:
possibilities = source
for line in set(possibilities):
pragma = line[:21]
if pragma == '//# sourceMappingURL=' or pragma == '//@ sourceMappingURL=':
# We want everything AFTER the pragma, which is 21 chars long
return line[21:].rstrip()
# XXX: Return None or raise an exception?
return None
|
py | b40ac6d64d3d097b4effd904fbb73e28744b7936 | #!/usr/bin/env python
import logging
import os
import sys
from codecs import open
from importlib import import_module
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
if sys.version_info < (3, 0):
from StringIO import StringIO
else:
from io import StringIO
BASEDIR = os.path.abspath(os.path.dirname(__file__))
EXCLUDE_FROM_PACKAGES = ['docs', 'tests']
long_description = StringIO()
version = import_module('django_cryptography').get_version()
with open(os.path.join(BASEDIR, 'README.rst'), encoding='utf-8') as fp:
in_block = False
for line in fp.readlines():
if not in_block and line.startswith('.. START HIDDEN'):
in_block = True
elif in_block and line.startswith('.. END HIDDEN'):
in_block = False
elif not in_block:
long_description.write(line)
# adapted from jaraco.classes.properties:NonDataProperty
class NonDataProperty:
def __init__(self, fget):
self.fget = fget
def __get__(self, obj, objtype=None):
if obj is None:
return self
return self.fget(obj)
class DjangoTest(TestCommand):
user_options = [
('settings=', None, "The Python path to a settings module, e.g. "
"\"myproject.settings.main\". If this isn't provided, the "
"DJANGO_SETTINGS_MODULE environment variable will be used."),
('noinput', None,
"Tells Django to NOT prompt the user for input of any kind."),
('failfast', None, "Tells Django to stop running the test suite after "
"first failed test."),
('testrunner=', None,
"Tells Django to use specified test runner class "
"instead of the one specified by the TEST_RUNNER setting."),
('liveserver=', None, "Overrides the default address where the live "
"server (used with LiveServerTestCase) is expected to run from. The "
"default value is localhost:8081."),
('top-level-directory=', 't', "Top level of project for unittest "
"discovery."),
('pattern=', 'p', "The test matching pattern. Defaults to test*.py."),
('keepdb', 'k', "Preserves the test DB between runs."),
('reverse', 'r', "Reverses test cases order."),
('debug-sql', 'd', "Prints logged SQL queries on failure."),
]
def initialize_options(self):
self.test_suite = 'DjangoTest'
self.settings = None
self.test_labels = None
self.noinput = 0
self.failfast = 0
self.testrunner = None
self.liveserver = None
self.top_level_directory = None
self.pattern = None
self.keepdb = False
self.reverse = False
self.debug_sql = False
self.output_dir = None
def finalize_options(self):
self.verbosity = self.verbose
if self.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = self.settings
if self.test_labels is not None:
self.test_labels = self.test_labels.split(',')
self.noinput = bool(self.noinput)
self.failfast = bool(self.failfast)
if self.liveserver is not None:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = self.liveserver
if self.pattern is None:
self.pattern = 'test*.py'
self.keepdb = bool(self.keepdb)
self.reverse = bool(self.reverse)
self.debug_sql = bool(self.debug_sql)
if self.output_dir is None:
self.output_dir = 'testxml'
@NonDataProperty
def test_args(self):
return list(self._test_args())
def _test_args(self):
if self.verbose:
yield '--verbose'
if self.test_suite:
yield self.test_suite
def run(self):
if self.verbosity > 0:
# ensure that deprecation warnings are displayed during testing
# the following state is assumed:
# logging.capturewarnings is true
# a "default" level warnings filter has been added for
# DeprecationWarning. See django.conf.LazySettings._configure_logging
logger = logging.getLogger('py.warnings')
handler = logging.StreamHandler()
logger.addHandler(handler)
TestCommand.run(self)
if self.verbosity > 0:
# remove the testing-specific handler
logger.removeHandler(handler)
def run_tests(self):
import django
django.setup()
from django.conf import settings
from django.test.utils import get_runner
TestRunner = get_runner(settings, self.testrunner)
test_runner = TestRunner(
pattern=self.pattern,
top_level=self.top_level_directory,
verbosity=self.verbose,
interactive=(not self.noinput),
failfast=self.failfast,
keepdb=self.keepdb,
reverse=self.reverse,
debug_sql=self.debug_sql,
output_dir=self.output_dir)
failures = test_runner.run_tests(self.test_labels)
sys.exit(bool(failures))
setup(
name='django-cryptography',
version=version,
description='Easily encrypt data in Django',
long_description=long_description.getvalue(),
url='https://github.com/georgemarshall/django-cryptography',
author='George Marshall',
author_email='[email protected]',
license='BSD',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.2',
'Framework :: Django :: 3.0',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security :: Cryptography',
],
packages=find_packages(exclude=EXCLUDE_FROM_PACKAGES),
install_requires=[
'django-appconf',
'cryptography',
],
tests_require=['Django'],
cmdclass={
'test': DjangoTest,
},
zip_safe=False,
)
|
py | b40ac92914eec86c89e32c5bab05a6424732dca7 | # Self attention.
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.registers import MODULES
def hw_flatten(x):
# Input shape x: [BATCH, HEIGHT, WIDTH, CHANNELS]
# flat the feature volume across the width and height dimensions
x_shape = x.shape
return torch.reshape(x, [x_shape[0], -1, x_shape[-1]]) # return [BATCH, W*H, CHANNELS]
class SkipParameter(nn.Module):
def __init__(self, init_value=0.01):
super(SkipParameter, self).__init__()
self.skip_value = nn.Parameter(torch.ones(1)) # requires_grad is True by default for Parameter
nn.init.constant_(self.skip_value, init_value)
def forward(self, f_a, concat):
return f_a + self.skip_value * concat
@MODULES.register_module
class SelfAttention(nn.Module):
def __init__(self, cfg, optim_spec=None):
super(SelfAttention, self).__init__()
'''Optimizer parameters used in training'''
self.optim_spec = optim_spec
self.cfg = cfg
'''Parameters'''
feat_dim = self.cfg.config['model']['self_attention']['appearance_feature_dim']
self.layer = feat_dim//4
'''Modules'''
self.gamma = nn.Parameter(torch.ones(1)) # requires_grad is True by default for Parameter
nn.init.constant_(self.gamma, 0.0)
#self.skip = SkipParameter()
self.F = torch.nn.Conv2d(feat_dim,self.layer, kernel_size=1, padding=0, stride=1, bias=False)
self.G = torch.nn.Conv2d(feat_dim,self.layer, kernel_size=1, padding=0, stride=1, bias=False)
self.H = torch.nn.Conv2d(feat_dim, feat_dim, kernel_size=1, padding=0, stride=1, bias=False)
#self.mlp = nn.Sequential(nn.Conv1d(256,128,1), \
# nn.BatchNorm1d(128), \
# nn.ReLU(), \
# nn.Conv1d(128,64,1))
def forward(self, inputs):
input, nms_feat = inputs
if nms_feat is not None:
feat = nms_feat
else:
feat = input
#print(inputs.shape)
feat = feat.unsqueeze(-1)
f = self.F(feat)
g = self.G(feat)
h = self.H(feat)
B, C, N, _ = h.shape
f = f.transpose(dim0=1,dim1=3)
g = g.transpose(dim0=1,dim1=3)
h = h.transpose(dim0=1,dim1=3)
s = torch.matmul(hw_flatten(g), hw_flatten(f).transpose(dim0=1,dim1=2)) # # [bs, N, N]
beta = F.softmax(s, dim=-1) # attention map
##### visualize
#attention_map = []
#for i in range(beta.shape[1]):
# attention_map.append("Proposal " + str(i) + ": " + str(torch.topk(beta[0][i], 10, dim=-1)))
#end_points['attention_map'] = attention_map
######
o = torch.matmul(beta, hw_flatten(h)) # [B, N, N]*[B, N, c]->[B, N, c]
o = torch.reshape(o, shape=[B, C, N]) # [B, C, N]
x = self.gamma * o + input
#x = self.skip(inputs, o)
#x = self.skip(inputs, concat)
#x = inputs + self.dropout(self.group_norm(o))
#x = input + o
#x = inputs + self.group_norm(concat)
#x = inputs + concat
#x = self.mlp(o)
return x |
py | b40ac9c73bda3b542c0630fdd1c707f63450bcd6 | # !/usr/bin/env python3
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md')) as f:
long_description = f.read()
setup_args = dict(
name='PCProphet',
version='0.0.1',
packages=find_packages(),
scripts=[
'PCprophet/collapse.py',
'PCprophet/differential.py',
'PCprophet/exceptions.py',
'PCprophet/generate_features_v2.py',
'PCprophet/go_fdr.py',
'PCprophet/io_.py',
'PCprophet/hypothesis.py',
'PCprophet/mcl.py',
'PCprophet/init.py'
'PCprophet/map_to_database.py',
'PCprophet/mcl.py',
'PCprophet/merge.py',
'PCprophet/parse_go.py',
'PCprophet/main.py',
'PCprophet/plots.py',
'PCprophet/predict.py',
'PCprophet/stats_.py',
'PCprophet/validate_input.py',
],
# long_description=long_description,
license='MIT',
install_requires=['scipy>=1.1', 'pandas', 'sklearn', 'networkX', 'dask>=2.30'],
package_data={
'PCprophet': ['go_term_class.txt', 'go-basic.obo', 'rf_equal.clf'],
},
# metadata to display on PyPI
author='Andrea Fossati',
author_email='[email protected]',
description='Software toolset for analysis of co-fractionation data',
keywords=['proteomics', 'machine-learning', 'signal-processing'],
url='https://github.com/fossatiA/PCProphet/',
project_urls={
'Bug Tracker': 'https://github.com/fossatiA/PCProphet/',
'Documentation': 'https://github.com/fossatiA/PCProphet/',
'Source Code': 'https://github.com/fossatiA/PCProphet/',
},
platforms="Linux, Mac OS X, Windows",
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Modified BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
entry_points={
'console_scripts': [
'PCprophet=PCprophet.app:main',
]
}
)
setup(**setup_args)
|
py | b40acadc5d1b517fe732582c8758d09df15c013d | import xml.etree.ElementTree as ET
from collections import Counter, defaultdict
import sys
import json
def generate_annotated_sentences(negbio_output, pathto_bioscope, modality_type, outfile=''):
assert modality_type in ['negation', 'speculation']
concept_idxs, text = extract_chexpert_concepts_and_text(negbio_output)
with open(pathto_bioscope, 'r') as f:
annotated_sentences = json.load(f)
concept_annotations = annotate_chexpert_concepts(annotated_sentences, concept_idxs, text, modality_type=modality_type)
if outfile:
with open(outfile, 'w') as f:
json.dump(concept_annotations, f)
return concept_annotations
def extract_chexpert_concepts_and_text(negbio_infile):
# extract chexpert concept spans together with the raw text from the NegBio output
tree = ET.parse(negbio_infile)
root = tree.getroot()
children = root.getchildren()
document = children[3].getchildren()
passage = document[1].getchildren()
text = passage[1].text
annotations = passage[2:]
concept_idxs = {}
for annotation in annotations:
for element in annotation.getchildren():
if element.tag == 'location':
data = element.attrib
start = int(data['offset'])
end = start + int(data['length'])
concept_idxs[start] = text[start:end]
return concept_idxs, text
def annotate_chexpert_concepts(annotated_sentences, concept_idxs, text, modality_type):
# tagged sentences input: {sentence_id: [tagged_versions_of_sentence]}
assert modality_type in ['negation', 'speculation']
concept_ids = defaultdict(lambda: defaultdict(list))
for sentence_index, tagged_sentences in annotated_sentences.items():
# each separate tagged sentence contains a single cue!
for tagged_sentence in tagged_sentences:
# search for cue
cue_tokens, cue_idxs = [], []
token_index_surplus = 0
for token_index, (token, label) in enumerate(tagged_sentence):
if label == 'CUE':
cue_tokens.append(token)
cue_idxs = []
for i in range(len(token.split())):
cue_idx = token_index + i + token_index_surplus
cue_idxs.append(cue_idx)
token_index_surplus += (len(token.split()) - 1)
cue = ' '.join(cue_tokens)
concept_count = 1
token_index_surplus = 0
for token_index, (token, tag) in enumerate(tagged_sentence):
text_position = text.index(token)
if text_position in concept_idxs:
# collect concept tokens
concept_identifier = 'C{}'.format(concept_count)
matching_concept = concept_idxs[text_position]
split_concept = matching_concept.split()
if len(split_concept) > 1:
new_concept_idx = text_position + matching_concept.index(split_concept[1])
concept_idxs[new_concept_idx] = ' '.join(split_concept[1:])
else:
concept_count += 1
# assign modality
if tag in ['I', 'B', 'A']:
cue_data = {'cue': cue,
'cue_idxs': cue_idxs}
else:
cue_data = None
# bundle data
token_dict = {'token_idx': token_index + token_index_surplus,
'cue_data': cue_data}
concept_ids[sentence_index][concept_identifier].append(token_dict)
token_index_surplus += (len(token.split()) - 1)
# fuse data into single organized concept annotations
concept_annotations = defaultdict(lambda: defaultdict(dict))
for sent_id, concept_data in concept_ids.items():
for concept_id, data in concept_data.items():
collected_cues = {}
token_idxs = []
for token_dict in data:
token_idx = token_dict['token_idx']
token_idxs.append(token_idx)
cue_data = token_dict['cue_data']
if cue_data:
collected_cues[tuple(cue_data['cue_idxs'])] = cue_data['cue']
organized_cues = {}
for cue_index, (cue_idxs, cue) in enumerate(sorted(collected_cues.items())):
cue_id = 'cue_{}'.format(cue_index)
organized_cues[cue_id] = {'cue': cue,
'cue_idxs': cue_idxs}
if organized_cues:
modality = True
else:
modality = False
# convert collected cues to cue identifiers!
concept_annotations[sent_id][concept_id][modality_type] = modality
concept_annotations[sent_id][concept_id]['token_idxs'] = token_idxs
concept_annotations[sent_id][concept_id]['cue_data'] = organized_cues
return concept_annotations
|
py | b40acb0b2e8b4376e878a4b2f7eaadb876dd664e | from kfp.v2.components.types.artifact_types import Artifact
from kfp.v2.dsl import (
Input,
Output,
OutputPath,
component,
ClassificationMetrics,
Metrics,
)
from google.cloud.bigquery import Model
from typing import NamedTuple
@component(
packages_to_install=[
"google-cloud-bigquery[all]",
"google-cloud-pipeline-components",
]
)
def bqml_create_model_op(
project: str,
location: str,
query: str,
# TODO: CMEK
metrics: Output[Metrics],
classification_metrics: Output[ClassificationMetrics],
) -> NamedTuple("Outputs", [("gcp_resources", str), ("model", str)]):
"""Create a BQML model
https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-create
"""
import collections
from google.cloud import bigquery
from google_cloud_pipeline_components.experimental.proto.gcp_resources_pb2 import (
GcpResources,
)
from google.protobuf import json_format
from typing import List, Optional
client = bigquery.Client(project=project, location=location)
# TODO: Add labels: https://cloud.google.com/bigquery/docs/adding-labels#job-label
query_job = client.query(query) # API request
_ = query_job.result() # Waits for query to finish
# Retrieve model name and model
table: bigquery.table.TableReference = query_job.ddl_target_table
model_name = f"{table.project}.{table.dataset_id}.{table.table_id}"
model = client.get_model(model_name)
def get_is_classification(model: Model) -> bool:
if model.training_runs:
last_training_run = model.training_runs[-1]
return (
last_training_run.evaluation_metrics.binary_classification_metrics
is not None
) or (
last_training_run.evaluation_metrics.multi_class_classification_metrics
is not None
)
else:
return False
# Build query
def build_query(
action_name: str,
model_name: str,
table_name: Optional[str],
query_statement: Optional[str],
thresholds: List[float],
) -> str:
parameters = [
f"MODEL `{model_name}`",
]
if table_name and query_statement:
raise ValueError(
"Only one of 'table_name' or 'query_statement' can be set, but not both."
)
elif table_name:
parameters.append(f"TABLE `{table_name}`")
elif query_statement:
parameters.append(f"({query_statement})")
if thresholds:
parameters.append(f"GENERATE_ARRAY({', '.join(thresholds)})")
return f"SELECT * FROM ML.{action_name}({', '.join(parameters)})"
def log_evaluations(
model_name: str,
table_name: Optional[str],
query_statement: Optional[str],
thresholds: List[float],
) -> GcpResources:
query = build_query(
action_name="EVALUATE",
model_name=model_name,
table_name=table_name,
query_statement=query_statement,
thresholds=thresholds,
)
client = bigquery.Client(project=project, location=location)
query_job = client.query(query) # API request
df = query_job.to_dataframe() # Waits for query to finish
# Log matrix
for name, value in df.to_dict(orient="list").items():
if not isinstance(value, collections.Sequence):
metrics.log_metric(name, value)
else:
metrics.log_metric(name, value[-1])
# Instantiate GCPResources Proto
query_job_resources = GcpResources()
query_job_resource = query_job_resources.resources.add()
# Write the job proto to output
query_job_resource.resource_type = "BigQueryJob"
query_job_resource.resource_uri = query_job.self_link
return query_job_resources
def log_confusion_matrix(
model_name: str,
table_name: Optional[str],
query_statement: Optional[str],
thresholds: List[float],
) -> GcpResources:
query = build_query(
action_name="CONFUSION_MATRIX",
model_name=model_name,
table_name=table_name,
query_statement=query_statement,
thresholds=thresholds,
)
client = bigquery.Client(project=project, location=location)
query_job = client.query(query) # API request
df = query_job.to_dataframe() # Waits for query to finish
df = df.drop("expected_label", 1)
categories = [column for column in df.columns]
matrix = df.values.tolist()
print(matrix)
classification_metrics.log_confusion_matrix(
categories=categories, matrix=matrix
)
# Instantiate GCPResources Proto
query_job_resources = GcpResources()
query_job_resource = query_job_resources.resources.add()
# Write the job proto to output
query_job_resource.resource_type = "BigQueryJob"
query_job_resource.resource_uri = query_job.self_link
return query_job_resource
def log_roc_curve(
model_name: str,
table_name: Optional[str],
query_statement: Optional[str],
thresholds: List[float],
) -> GcpResources:
query = build_query(
action_name="ROC_CURVE",
model_name=model_name,
table_name=table_name,
query_statement=query_statement,
thresholds=thresholds,
)
client = bigquery.Client(project=project, location=location)
query_job = client.query(query) # API request
df = query_job.to_dataframe() # Waits for query to finish
df_dict = df.to_dict(orient="list")
classification_metrics.log_roc_curve(
fpr=df_dict["false_positive_rate"],
tpr=df_dict["recall"],
threshold=df_dict["threshold"],
)
# Instantiate GCPResources Proto
query_job_resources = GcpResources()
query_job_resource = query_job_resources.resources.add()
# Write the job proto to output
query_job_resource.resource_type = "BigQueryJob"
query_job_resource.resource_uri = query_job.self_link
return query_job_resources
log_evaluations(
model_name=model_name,
table_name=None,
query_statement=None,
thresholds=[],
)
if get_is_classification(model=model):
# Log confusion matric
log_confusion_matrix(
model_name=model_name,
table_name=None,
query_statement=None,
thresholds=[],
)
# Log roc curve
log_roc_curve(
model_name=model_name,
table_name=None,
query_statement=None,
thresholds=[],
)
# Instantiate GCPResources Proto
query_job_resources = GcpResources()
query_job_resource = query_job_resources.resources.add()
# Write the job proto to output
query_job_resource.resource_type = "BigQueryJob"
query_job_resource.resource_uri = query_job.self_link
query_job_resources_serialized = json_format.MessageToJson(query_job_resources)
from collections import namedtuple
output = namedtuple("Outputs", ["gcp_resources", "model"])
return output(query_job_resources_serialized, model_name)
|
py | b40acb1bf5f81bb004c9f9467ffa1d60e2ece9c5 | import os
from collections import namedtuple
import six
import terminaltables
import yaml
from dagster import check
from .term import Term
HOST_CONFIG_FILE = 'dagster-aws-config.yaml'
class ConfigMixin(object):
@classmethod
def exists(cls, base_path):
'''Check that the configuration file exists and the key for this config type exists in the
file
'''
cfg_path = os.path.join(base_path, HOST_CONFIG_FILE)
if not os.path.exists(cfg_path):
return False
with open(cfg_path, 'rb') as f:
record = yaml.safe_load(f)
if record and cls.KEY in record:
return True
return False
def save(self, base_path):
'''Serialize configuration to a YAML file for future use
'''
if not os.path.exists(base_path):
os.makedirs(base_path)
cfg_path = os.path.join(base_path, HOST_CONFIG_FILE)
record = {}
if os.path.exists(cfg_path):
with open(cfg_path, 'rb') as f:
record = yaml.safe_load(f) or {}
record[self.KEY] = dict(self._asdict())
with open(cfg_path, 'wb') as f:
f.write(six.ensure_binary(yaml.dump(record, default_flow_style=False)))
Term.info('Saved %s configuration to %s' % (self.TITLE, cfg_path))
@classmethod
def load(cls, base_path):
'''Deserialize the configuration from the YAML config file.
'''
filepath = os.path.join(base_path, HOST_CONFIG_FILE)
if not os.path.exists(filepath):
Term.fatal('No configuration found, run `dagster-aws init` to get started.')
with open(filepath, 'rb') as f:
raw_cfg = yaml.safe_load(f)
return cls.__new__(cls, **raw_cfg.get(cls.KEY, {}))
def as_table(self):
'''Returns a tabulated string representation of this config class
'''
as_dict = self._asdict()
if 'password' in as_dict:
as_dict['password'] = '<redacted>'
table_data = [('Config Key', 'Value')] + list(as_dict.items())
return terminaltables.SingleTable(table_data, title=self.TITLE).table
def delete(self, base_path):
'''Remove the configuration for this resource from HOST_CONFIG_FILE
'''
if not self.exists(base_path):
Term.warning('No configuration for %s found, skipping deletion' % self.KEY)
return False
cfg_path = os.path.join(base_path, HOST_CONFIG_FILE)
with open(cfg_path, 'rb') as f:
record = yaml.safe_load(f) or {}
if self.KEY in record:
del record[self.KEY]
with open(cfg_path, 'wb') as f:
f.write(six.ensure_binary(yaml.dump(record, default_flow_style=False)))
Term.info('Removed configuration for %s from %s' % (self.KEY, cfg_path))
return True
return False
class RDSConfig(
namedtuple(
'_RDSConfig',
'instance_name instance_type storage_size_gb db_engine db_engine_version instance_uri '
'db_name username password',
),
ConfigMixin,
):
TITLE = 'RDS Configuration'
KEY = 'rds'
def __new__(
cls,
instance_name=None,
instance_type=None,
storage_size_gb=20,
db_engine='postgres',
db_engine_version='11.5',
instance_uri=None,
db_name='dagster',
username='dagster',
password=None,
):
return super(RDSConfig, cls).__new__(
cls,
instance_name=check.opt_str_param(instance_name, 'instance_name'),
instance_type=check.opt_str_param(instance_type, 'instance_type'),
storage_size_gb=check.opt_int_param(storage_size_gb, 'storage_size_gb'),
db_engine=check.opt_str_param(db_engine, 'db_engine'),
db_engine_version=check.opt_str_param(db_engine_version, 'db_engine_version'),
instance_uri=check.opt_str_param(instance_uri, 'instance_uri'),
db_name=check.opt_str_param(db_name, 'db_name'),
username=check.opt_str_param(username, 'username'),
password=check.opt_str_param(password, 'password'),
)
class EC2Config(
namedtuple(
'_HostConfig',
'remote_host instance_id region security_group_id key_pair_name key_file_path ami_id '
'local_path',
),
ConfigMixin,
):
'''Serialize the user's AWS host configuration to a YAML file for future use.
'''
TITLE = 'EC2 Configuration'
KEY = 'ec2'
def __new__(
cls,
remote_host=None,
instance_id=None,
region=None,
security_group_id=None,
key_pair_name=None,
key_file_path=None,
ami_id=None,
local_path=None,
):
return super(EC2Config, cls).__new__(
cls,
remote_host=check.opt_str_param(remote_host, 'remote_host'),
instance_id=check.opt_str_param(instance_id, 'instance_id'),
region=check.opt_str_param(region, 'region'),
security_group_id=check.opt_str_param(security_group_id, 'security_group_id'),
key_pair_name=check.opt_str_param(key_pair_name, 'key_pair_name'),
key_file_path=check.opt_str_param(key_file_path, 'key_file_path'),
ami_id=check.opt_str_param(ami_id, 'ami_id'),
local_path=check.opt_str_param(local_path, 'local_path'),
)
|
py | b40acb7440df76976094bef4070702d9ec721bc7 | import warnings
import numpy as np
from astropy.modeling import ParameterError
from numpy.lib.stride_tricks import as_strided
from kospeech.utils import logger
def load_audio(audio_path: str, del_silence: bool = False):
"""
Load audio file (PCM) to sound. if del_silence is True, Eliminate all sounds below 30dB.
If exception occurs in numpy.memmap(), return None.
"""
try:
signal = np.memmap(audio_path, dtype='h', mode='r').astype('float32')
if del_silence:
non_silence_indices = split(signal, top_db=30)
signal = np.concatenate([signal[start:end] for start, end in non_silence_indices])
return signal / 32767 # normalize audio
except ValueError:
logger.debug('ValueError in {0}'.format(audio_path))
return None
except RuntimeError:
logger.debug('RuntimeError in {0}'.format(audio_path))
return None
except IOError:
logger.debug('IOError in {0}'.format(audio_path))
return None
def __power_to_db(S, ref=1.0, amin=1e-10, top_db=80.0):
"""
codes from https://github.com/librosa/librosa
use this code fragments instead of importing librosa package,
because of our server has a problem with importing librosa.
"""
S = np.asarray(S)
if amin <= 0:
raise ParameterError('amin must be strictly positive')
if np.issubdtype(S.dtype, np.complexfloating):
warnings.warn('power_to_db was called on complex input so phase '
'information will be discarded. To suppress this warning, '
'call power_to_db(np.abs(D)**2) instead.')
magnitude = np.abs(S)
else:
magnitude = S
if callable(ref):
# User supplied a function to calculate reference power
ref_value = ref(magnitude)
else:
ref_value = np.abs(ref)
log_spec = 10.0 * np.log10(np.maximum(amin, magnitude))
log_spec -= 10.0 * np.log10(np.maximum(amin, ref_value))
if top_db is not None:
if top_db < 0:
raise ParameterError('top_db must be non-negative')
log_spec = np.maximum(log_spec, log_spec.max() - top_db)
return log_spec
def __to_mono(y):
"""
codes from https://github.com/librosa/librosa
use this code fragments instead of importing librosa package,
because of our server has a problem with importing librosa.
"""
def valid_audio(y, mono=True):
if not isinstance(y, np.ndarray):
raise ParameterError('Audio data must be of type numpy.ndarray')
if not np.issubdtype(y.dtype, np.floating):
raise ParameterError('Audio data must be floating-point')
elif mono and y.ndim != 1:
raise ParameterError('Invalid shape for monophonic audio: '
'ndim={:d}, shape={}'.format(y.ndim, y.shape))
if y.ndim > 2 or y.ndim == 0:
raise ParameterError('Audio data must have shape (samples,) or (channels, samples). '
'Received shape={}'.format(y.shape))
if not np.isfinite(y).all():
raise ParameterError('Audio buffer is not finite everywhere')
if not y.flags["F_CONTIGUOUS"]:
raise ParameterError('Audio buffer is not Fortran-contiguous. '
'Use numpy.asfortranarray to ensure Fortran contiguity.')
return True
# Ensure Fortran contiguity.
y = np.asfortranarray(y)
# Validate the buffer. Stereo is ok here.
valid_audio(y, mono=False)
if y.ndim > 1:
y = np.mean(y, axis=0)
return y
def __frame(x, frame_length=2048, hop_length=512, axis=-1):
"""
codes from https://github.com/librosa/librosa
use this code fragments instead of importing librosa package,
because of our server has a problem with importing librosa.
"""
if not isinstance(x, np.ndarray):
raise ParameterError('Input must be of type numpy.ndarray, '
'given type(x)={}'.format(type(x)))
if x.shape[axis] < frame_length:
raise ParameterError('Input is too short (n={:d})'
' for frame_length={:d}'.format(x.shape[axis], frame_length))
if hop_length < 1:
raise ParameterError('Invalid hop_length: {:d}'.format(hop_length))
n_frames = 1 + (x.shape[axis] - frame_length) // hop_length
strides = np.asarray(x.strides)
new_stride = np.prod(strides[strides > 0] // x.itemsize) * x.itemsize
if axis == -1:
if not x.flags['F_CONTIGUOUS']:
raise ParameterError('Input array must be F-contiguous '
'for framing along axis={}'.format(axis))
shape = list(x.shape)[:-1] + [frame_length, n_frames]
strides = list(strides) + [hop_length * new_stride]
elif axis == 0:
if not x.flags['C_CONTIGUOUS']:
raise ParameterError('Input array must be C-contiguous '
'for framing along axis={}'.format(axis))
shape = [n_frames, frame_length] + list(x.shape)[1:]
strides = [hop_length * new_stride] + list(strides)
else:
raise ParameterError('Frame axis={} must be either 0 or -1'.format(axis))
return as_strided(x, shape=shape, strides=strides)
def __rms(y=None, S=None, frame_length=2048, hop_length=512,
center=True, pad_mode='reflect'):
"""
codes from https://github.com/librosa/librosa
use this code fragments instead of importing librosa package,
because of our server has a problem with importing librosa.
"""
if y is not None:
y = __to_mono(y)
if center:
y = np.pad(y, int(frame_length // 2), mode=pad_mode)
x = __frame(y,
frame_length=frame_length,
hop_length=hop_length)
# Calculate power
power = np.mean(np.abs(x) ** 2, axis=0, keepdims=True)
elif S is not None:
# Check the frame length
if S.shape[0] != frame_length // 2 + 1:
raise ParameterError(
'Since S.shape[0] is {}, '
'frame_length is expected to be {} or {}; '
'found {}'.format(
S.shape[0],
S.shape[0] * 2 - 2, S.shape[0] * 2 - 1,
frame_length))
# power spectrogram
x = np.abs(S) ** 2
# Adjust the DC and sr/2 component
x[0] *= 0.5
if frame_length % 2 == 0:
x[-1] *= 0.5
# Calculate power
power = 2 * np.sum(x, axis=0, keepdims=True) / frame_length ** 2
else:
raise ParameterError('Either `y` or `S` must be input.')
return np.sqrt(power)
def _signal_to_frame_nonsilent(y, frame_length=2048, hop_length=512, top_db=60,
ref=np.max):
"""
codes from https://github.com/librosa/librosa
use this code fragments instead of importing librosa package,
because of our server has a problem with importing librosa.
"""
# Convert to mono
y_mono = __to_mono(y)
# Compute the MSE for the signal
mse = __rms(y=y_mono,
frame_length=frame_length,
hop_length=hop_length) ** 2
return __power_to_db(mse.squeeze(), ref=ref, top_db=None) > - top_db
def _frames_to_samples(frames, hop_length=512, n_fft=None):
"""
codes from https://github.com/librosa/librosa
use this code fragments instead of importing librosa package,
because of our server has a problem with importing librosa.
"""
offset = 0
if n_fft is not None:
offset = int(n_fft // 2)
return (np.asanyarray(frames) * hop_length + offset).astype(int)
def split(y, top_db=60, ref=np.max, frame_length=2048, hop_length=512):
"""
codes from https://github.com/librosa/librosa
use this code fragments instead of importing librosa package,
because of our server has a problem with importing librosa.
"""
non_silent = _signal_to_frame_nonsilent(y,
frame_length=frame_length,
hop_length=hop_length,
ref=ref,
top_db=top_db)
# Interval slicing, adapted from
# https://stackoverflow.com/questions/2619413/efficiently-finding-the-interval-with-non-zeros-in-scipy-numpy-in-python
# Find points where the sign flips
edges = np.flatnonzero(np.diff(non_silent.astype(int)))
# Pad back the sample lost in the diff
edges = [edges + 1]
# If the first frame had high energy, count it
if non_silent[0]:
edges.insert(0, [0])
# Likewise for the last frame
if non_silent[-1]:
edges.append([len(non_silent)])
# Convert from frames to samples
edges = _frames_to_samples(np.concatenate(edges),
hop_length=hop_length)
# Clip to the signal duration
edges = np.minimum(edges, y.shape[-1])
# Stack the results back as an ndarray
return edges.reshape((-1, 2))
|
py | b40acba2c3811651ca796395d4ecae21e5061673 | #!/usr/bin/python3
import os
import tempfile
from pathlib import Path
from build_utils import download_and_unpack_archive, run_and_check
def install() -> None:
print(f"Installing dependencies...")
dependencies = [
"build-essential",
"bison",
"flex",
"libgmp3-dev",
"libmpc-dev",
"libmpfr-dev",
"texinfo",
"xorriso",
]
# run_and_check(["sudo", "apt", "install", *dependencies])
axle_dir = Path(__file__).parent
arch_target = "i686-elf"
toolchain_dir = axle_dir / "i686-toolchain"
with tempfile.TemporaryDirectory() as build_dir_raw:
build_dir = Path(build_dir_raw)
if False:
binutils_url = "https://ftp.gnu.org/gnu/binutils/binutils-2.37.tar.gz"
binutils_dir = download_and_unpack_archive(build_dir, binutils_url)
binutils_build_dir = build_dir / "build-binutils"
binutils_build_dir.mkdir(exist_ok=True)
configure_script_path = binutils_dir / "configure"
run_and_check(
[
configure_script_path.as_posix(),
f"--target={arch_target}",
f"--prefix={toolchain_dir.as_posix()}",
"--with-sysroot",
"--disable-nls",
"--disable-werror",
],
cwd=binutils_build_dir,
)
run_and_check(["make"], cwd=binutils_build_dir)
run_and_check(["make", "install"], cwd=binutils_build_dir)
if True:
gcc_url = "https://ftp.gnu.org/gnu/gcc/gcc-11.2.0/gcc-11.2.0.tar.gz"
gcc_dir = download_and_unpack_archive(build_dir, gcc_url)
gcc_build_dir = build_dir / "build-gcc"
gcc_build_dir.mkdir(exist_ok=True)
configure_script_path = gcc_dir / "configure"
run_and_check(
[
configure_script_path.as_posix(),
f"--target={arch_target}",
f"--prefix={toolchain_dir.as_posix()}",
"--disable-nls",
"--enable-languages=c",
"--without-headers",
],
cwd=gcc_build_dir,
)
run_and_check(["make", "all-gcc"], cwd=gcc_build_dir)
run_and_check(["make", "all-target-libgcc"], cwd=gcc_build_dir)
run_and_check(["make", "install-gcc"], cwd=gcc_build_dir)
run_and_check(["make", "install-target-libgcc"], cwd=gcc_build_dir)
# os.symlink("/usr/bin/grub-mkrescue", (toolchain_dir / "bin" / "grub-mkrescue").as_posix())
os.symlink("/usr/lib/grub/i386-pc", (toolchain_dir / "lib" / "grub" / "i386-pc").as_posix())
if __name__ == "__main__":
install()
|
py | b40acbb346f17c74d41f6603aada67e148e16f1e | from django.contrib import admin
from .models import UrlItem, UrlViews
@admin.register(UrlItem)
class UrlItemAdmin(admin.ModelAdmin):
list_display = ('owner', 'entered_url', 'short_code', 'creation_date')
@admin.register(UrlViews)
class UrlViewsAdmin(admin.ModelAdmin):
list_display = ('item', 'ip', 'view_date')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.