filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_26710
|
import xarray as xr
from xcube.core.compute import compute_dataset
from impl.algorithm import compute_chunk
def process_dataset(dataset: xr.Dataset,
output_var_name: str,
input_var_name_1: str,
input_var_name_2: str,
factor_1: float = 1.0,
factor_2: float = 2.0) -> xr.Dataset:
return compute_dataset(compute_chunk,
dataset,
input_var_names=[input_var_name_1, input_var_name_2],
input_params=dict(factor_1=factor_1, factor_2=factor_2),
output_var_name=output_var_name)
|
the-stack_0_26712
|
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the Google Cloud Storage API."""
import base64
import binascii
import collections
import datetime
import functools
import json
import warnings
import google.api_core.client_options
from google.auth.credentials import AnonymousCredentials
from google.api_core import page_iterator
from google.cloud._helpers import _LocalStack, _NOW
from google.cloud.client import ClientWithProject
from google.cloud.exceptions import NotFound
from google.cloud.storage._helpers import _get_storage_host
from google.cloud.storage._http import Connection
from google.cloud.storage._signing import (
get_expiration_seconds_v4,
get_v4_now_dtstamps,
ensure_signed_credentials,
_sign_message,
)
from google.cloud.storage.batch import Batch
from google.cloud.storage.bucket import Bucket
from google.cloud.storage.blob import Blob
from google.cloud.storage.hmac_key import HMACKeyMetadata
from google.cloud.storage.acl import BucketACL
from google.cloud.storage.acl import DefaultObjectACL
from google.cloud.storage.constants import _DEFAULT_TIMEOUT
_marker = object()
class Client(ClientWithProject):
"""Client to bundle configuration needed for API requests.
:type project: str or None
:param project: the project which the client acts on behalf of. Will be
passed when creating a topic. If not passed,
falls back to the default inferred from the environment.
:type credentials: :class:`~google.auth.credentials.Credentials`
:param credentials: (Optional) The OAuth2 Credentials to use for this
client. If not passed (and if no ``_http`` object is
passed), falls back to the default inferred from the
environment.
:type _http: :class:`~requests.Session`
:param _http: (Optional) HTTP object to make requests. Can be any object
that defines ``request()`` with the same interface as
:meth:`requests.Session.request`. If not passed, an
``_http`` object is created that is bound to the
``credentials`` for the current object.
This parameter should be considered private, and could
change in the future.
:type client_info: :class:`~google.api_core.client_info.ClientInfo`
:param client_info:
The client info used to send a user-agent string along with API
requests. If ``None``, then default info will be used. Generally,
you only need to set this if you're developing your own library
or partner tool.
:type client_options: :class:`~google.api_core.client_options.ClientOptions` or :class:`dict`
:param client_options: (Optional) Client options used to set user options on the client.
API Endpoint should be set through client_options.
"""
SCOPE = (
"https://www.googleapis.com/auth/devstorage.full_control",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/devstorage.read_write",
)
"""The scopes required for authenticating as a Cloud Storage consumer."""
def __init__(
self,
project=_marker,
credentials=None,
_http=None,
client_info=None,
client_options=None,
):
self._base_connection = None
if project is None:
no_project = True
project = "<none>"
else:
no_project = False
if project is _marker:
project = None
super(Client, self).__init__(
project=project, credentials=credentials, _http=_http
)
kw_args = {"client_info": client_info}
kw_args["api_endpoint"] = _get_storage_host()
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
kw_args["api_endpoint"] = api_endpoint
if no_project:
self.project = None
self._connection = Connection(self, **kw_args)
self._batch_stack = _LocalStack()
@classmethod
def create_anonymous_client(cls):
"""Factory: return client with anonymous credentials.
.. note::
Such a client has only limited access to "public" buckets:
listing their contents and downloading their blobs.
:rtype: :class:`google.cloud.storage.client.Client`
:returns: Instance w/ anonymous credentials and no project.
"""
client = cls(project="<none>", credentials=AnonymousCredentials())
client.project = None
return client
@property
def _connection(self):
"""Get connection or batch on the client.
:rtype: :class:`google.cloud.storage._http.Connection`
:returns: The connection set on the client, or the batch
if one is set.
"""
if self.current_batch is not None:
return self.current_batch
else:
return self._base_connection
@_connection.setter
def _connection(self, value):
"""Set connection on the client.
Intended to be used by constructor (since the base class calls)
self._connection = connection
Will raise if the connection is set more than once.
:type value: :class:`google.cloud.storage._http.Connection`
:param value: The connection set on the client.
:raises: :class:`ValueError` if connection has already been set.
"""
if self._base_connection is not None:
raise ValueError("Connection already set on client")
self._base_connection = value
def _push_batch(self, batch):
"""Push a batch onto our stack.
"Protected", intended for use by batch context mgrs.
:type batch: :class:`google.cloud.storage.batch.Batch`
:param batch: newly-active batch
"""
self._batch_stack.push(batch)
def _pop_batch(self):
"""Pop a batch from our stack.
"Protected", intended for use by batch context mgrs.
:raises: IndexError if the stack is empty.
:rtype: :class:`google.cloud.storage.batch.Batch`
:returns: the top-most batch/transaction, after removing it.
"""
return self._batch_stack.pop()
def _bucket_arg_to_bucket(self, bucket_or_name):
"""Helper to return given bucket or create new by name.
Args:
bucket_or_name (Union[ \
:class:`~google.cloud.storage.bucket.Bucket`, \
str, \
]):
The bucket resource to pass or name to create.
Returns:
google.cloud.storage.bucket.Bucket
The newly created bucket or the given one.
"""
if isinstance(bucket_or_name, Bucket):
bucket = bucket_or_name
else:
bucket = Bucket(self, name=bucket_or_name)
return bucket
@property
def current_batch(self):
"""Currently-active batch.
:rtype: :class:`google.cloud.storage.batch.Batch` or ``NoneType`` (if
no batch is active).
:returns: The batch at the top of the batch stack.
"""
return self._batch_stack.top
def get_service_account_email(self, project=None, timeout=_DEFAULT_TIMEOUT):
"""Get the email address of the project's GCS service account
:type project: str
:param project:
(Optional) Project ID to use for retreiving GCS service account
email address. Defaults to the client's project.
:type timeout: float or tuple
:param timeout: (Optional) The amount of time, in seconds, to wait
for the server response.
Can also be passed as a tuple (connect_timeout, read_timeout).
See :meth:`requests.Session.request` documentation for details.
:rtype: str
:returns: service account email address
"""
if project is None:
project = self.project
path = "/projects/%s/serviceAccount" % (project,)
api_response = self._base_connection.api_request(
method="GET", path=path, timeout=timeout
)
return api_response["email_address"]
def bucket(self, bucket_name, user_project=None):
"""Factory constructor for bucket object.
.. note::
This will not make an HTTP request; it simply instantiates
a bucket object owned by this client.
:type bucket_name: str
:param bucket_name: The name of the bucket to be instantiated.
:type user_project: str
:param user_project: (Optional) The project ID to be billed for API
requests made via the bucket.
:rtype: :class:`google.cloud.storage.bucket.Bucket`
:returns: The bucket object created.
"""
return Bucket(client=self, name=bucket_name, user_project=user_project)
def batch(self):
"""Factory constructor for batch object.
.. note::
This will not make an HTTP request; it simply instantiates
a batch object owned by this client.
:rtype: :class:`google.cloud.storage.batch.Batch`
:returns: The batch object created.
"""
return Batch(client=self)
def get_bucket(self, bucket_or_name, timeout=_DEFAULT_TIMEOUT):
"""API call: retrieve a bucket via a GET request.
See
https://cloud.google.com/storage/docs/json_api/v1/buckets/get
Args:
bucket_or_name (Union[ \
:class:`~google.cloud.storage.bucket.Bucket`, \
str, \
]):
The bucket resource to pass or name to create.
timeout (Optional[Union[float, Tuple[float, float]]]):
The amount of time, in seconds, to wait for the server response.
Can also be passed as a tuple (connect_timeout, read_timeout).
See :meth:`requests.Session.request` documentation for details.
Returns:
google.cloud.storage.bucket.Bucket
The bucket matching the name provided.
Raises:
google.cloud.exceptions.NotFound
If the bucket is not found.
Examples:
Retrieve a bucket using a string.
.. literalinclude:: snippets.py
:start-after: [START get_bucket]
:end-before: [END get_bucket]
Get a bucket using a resource.
>>> from google.cloud import storage
>>> client = storage.Client()
>>> # Set properties on a plain resource object.
>>> bucket = client.get_bucket("my-bucket-name")
>>> # Time passes. Another program may have modified the bucket
... # in the meantime, so you want to get the latest state.
>>> bucket = client.get_bucket(bucket) # API request.
"""
bucket = self._bucket_arg_to_bucket(bucket_or_name)
bucket.reload(client=self, timeout=timeout)
return bucket
def lookup_bucket(self, bucket_name, timeout=_DEFAULT_TIMEOUT):
"""Get a bucket by name, returning None if not found.
You can use this if you would rather check for a None value
than catching an exception:
.. literalinclude:: snippets.py
:start-after: [START lookup_bucket]
:end-before: [END lookup_bucket]
:type bucket_name: str
:param bucket_name: The name of the bucket to get.
:type timeout: float or tuple
:param timeout: (Optional) The amount of time, in seconds, to wait
for the server response.
Can also be passed as a tuple (connect_timeout, read_timeout).
See :meth:`requests.Session.request` documentation for details.
:rtype: :class:`google.cloud.storage.bucket.Bucket`
:returns: The bucket matching the name provided or None if not found.
"""
try:
return self.get_bucket(bucket_name, timeout=timeout)
except NotFound:
return None
def create_bucket(
self,
bucket_or_name,
requester_pays=None,
project=None,
user_project=None,
location=None,
predefined_acl=None,
predefined_default_object_acl=None,
timeout=_DEFAULT_TIMEOUT,
):
"""API call: create a new bucket via a POST request.
See
https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
Args:
bucket_or_name (Union[ \
:class:`~google.cloud.storage.bucket.Bucket`, \
str, \
]):
The bucket resource to pass or name to create.
requester_pays (bool):
DEPRECATED. Use Bucket().requester_pays instead.
(Optional) Whether requester pays for API requests for
this bucket and its blobs.
project (str):
(Optional) The project under which the bucket is to be created.
If not passed, uses the project set on the client.
user_project (str):
(Optional) The project ID to be billed for API requests
made via created bucket.
location (str):
(Optional) The location of the bucket. If not passed,
the default location, US, will be used. See
https://cloud.google.com/storage/docs/bucket-locations
predefined_acl (str):
(Optional) Name of predefined ACL to apply to bucket. See:
https://cloud.google.com/storage/docs/access-control/lists#predefined-acl
predefined_default_object_acl (str):
(Optional) Name of predefined ACL to apply to bucket's objects. See:
https://cloud.google.com/storage/docs/access-control/lists#predefined-acl
timeout (Optional[Union[float, Tuple[float, float]]]):
The amount of time, in seconds, to wait for the server response.
Can also be passed as a tuple (connect_timeout, read_timeout).
See :meth:`requests.Session.request` documentation for details.
Returns:
google.cloud.storage.bucket.Bucket
The newly created bucket.
Raises:
google.cloud.exceptions.Conflict
If the bucket already exists.
Examples:
Create a bucket using a string.
.. literalinclude:: snippets.py
:start-after: [START create_bucket]
:end-before: [END create_bucket]
Create a bucket using a resource.
>>> from google.cloud import storage
>>> client = storage.Client()
>>> # Set properties on a plain resource object.
>>> bucket = storage.Bucket("my-bucket-name")
>>> bucket.location = "europe-west6"
>>> bucket.storage_class = "COLDLINE"
>>> # Pass that resource object to the client.
>>> bucket = client.create_bucket(bucket) # API request.
"""
bucket = self._bucket_arg_to_bucket(bucket_or_name)
if project is None:
project = self.project
if project is None:
raise ValueError("Client project not set: pass an explicit project.")
if requester_pays is not None:
warnings.warn(
"requester_pays arg is deprecated. Use Bucket().requester_pays instead.",
PendingDeprecationWarning,
stacklevel=1,
)
bucket.requester_pays = requester_pays
query_params = {"project": project}
if predefined_acl is not None:
predefined_acl = BucketACL.validate_predefined(predefined_acl)
query_params["predefinedAcl"] = predefined_acl
if predefined_default_object_acl is not None:
predefined_default_object_acl = DefaultObjectACL.validate_predefined(
predefined_default_object_acl
)
query_params["predefinedDefaultObjectAcl"] = predefined_default_object_acl
if user_project is not None:
query_params["userProject"] = user_project
properties = {key: bucket._properties[key] for key in bucket._changes}
properties["name"] = bucket.name
if location is not None:
properties["location"] = location
api_response = self._connection.api_request(
method="POST",
path="/b",
query_params=query_params,
data=properties,
_target_object=bucket,
timeout=timeout,
)
bucket._set_properties(api_response)
return bucket
def download_blob_to_file(self, blob_or_uri, file_obj, start=None, end=None):
"""Download the contents of a blob object or blob URI into a file-like object.
Args:
blob_or_uri (Union[ \
:class:`~google.cloud.storage.blob.Blob`, \
str, \
]):
The blob resource to pass or URI to download.
file_obj (file):
A file handle to which to write the blob's data.
start (int):
(Optional) The first byte in a range to be downloaded.
end (int):
(Optional) The last byte in a range to be downloaded.
Examples:
Download a blob using using a blob resource.
>>> from google.cloud import storage
>>> client = storage.Client()
>>> bucket = client.get_bucket('my-bucket-name')
>>> blob = storage.Blob('path/to/blob', bucket)
>>> with open('file-to-download-to') as file_obj:
>>> client.download_blob_to_file(blob, file_obj) # API request.
Download a blob using a URI.
>>> from google.cloud import storage
>>> client = storage.Client()
>>> with open('file-to-download-to') as file_obj:
>>> client.download_blob_to_file(
>>> 'gs://bucket_name/path/to/blob', file_obj)
"""
try:
blob_or_uri.download_to_file(file_obj, client=self, start=start, end=end)
except AttributeError:
blob = Blob.from_string(blob_or_uri)
blob.download_to_file(file_obj, client=self, start=start, end=end)
def list_blobs(
self,
bucket_or_name,
max_results=None,
page_token=None,
prefix=None,
delimiter=None,
versions=None,
projection="noAcl",
fields=None,
timeout=_DEFAULT_TIMEOUT,
):
"""Return an iterator used to find blobs in the bucket.
If :attr:`user_project` is set, bills the API request to that project.
Args:
bucket_or_name (Union[ \
:class:`~google.cloud.storage.bucket.Bucket`, \
str, \
]):
The bucket resource to pass or name to create.
max_results (int):
(Optional) The maximum number of blobs to return.
page_token (str):
(Optional) If present, return the next batch of blobs, using the
value, which must correspond to the ``nextPageToken`` value
returned in the previous response. Deprecated: use the ``pages``
property of the returned iterator instead of manually passing the
token.
prefix (str):
(Optional) Prefix used to filter blobs.
delimiter (str):
(Optional) Delimiter, used with ``prefix`` to
emulate hierarchy.
versions (bool):
(Optional) Whether object versions should be returned
as separate blobs.
projection (str):
(Optional) If used, must be 'full' or 'noAcl'.
Defaults to ``'noAcl'``. Specifies the set of
properties to return.
fields (str):
(Optional) Selector specifying which fields to include
in a partial response. Must be a list of fields. For
example to get a partial response with just the next
page token and the name and language of each blob returned:
``'items(name,contentLanguage),nextPageToken'``.
See: https://cloud.google.com/storage/docs/json_api/v1/parameters#fields
timeout (Optional[Union[float, Tuple[float, float]]]):
The amount of time, in seconds, to wait for the server response.
Can also be passed as a tuple (connect_timeout, read_timeout).
See :meth:`requests.Session.request` documentation for details.
Returns:
Iterator of all :class:`~google.cloud.storage.blob.Blob`
in this bucket matching the arguments.
"""
bucket = self._bucket_arg_to_bucket(bucket_or_name)
return bucket.list_blobs(
max_results=max_results,
page_token=page_token,
prefix=prefix,
delimiter=delimiter,
versions=versions,
projection=projection,
fields=fields,
client=self,
timeout=timeout,
)
def list_buckets(
self,
max_results=None,
page_token=None,
prefix=None,
projection="noAcl",
fields=None,
project=None,
timeout=_DEFAULT_TIMEOUT,
):
"""Get all buckets in the project associated to the client.
This will not populate the list of blobs available in each
bucket.
.. literalinclude:: snippets.py
:start-after: [START list_buckets]
:end-before: [END list_buckets]
This implements "storage.buckets.list".
:type max_results: int
:param max_results: (Optional) The maximum number of buckets to return.
:type page_token: str
:param page_token:
(Optional) If present, return the next batch of buckets, using the
value, which must correspond to the ``nextPageToken`` value
returned in the previous response. Deprecated: use the ``pages``
property of the returned iterator instead of manually passing the
token.
:type prefix: str
:param prefix: (Optional) Filter results to buckets whose names begin
with this prefix.
:type projection: str
:param projection:
(Optional) Specifies the set of properties to return. If used, must
be 'full' or 'noAcl'. Defaults to 'noAcl'.
:type fields: str
:param fields:
(Optional) Selector specifying which fields to include in a partial
response. Must be a list of fields. For example to get a partial
response with just the next page token and the language of each
bucket returned: 'items/id,nextPageToken'
:type project: str
:param project: (Optional) The project whose buckets are to be listed.
If not passed, uses the project set on the client.
:type timeout: float or tuple
:param timeout: (Optional) The amount of time, in seconds, to wait
for the server response.
Can also be passed as a tuple (connect_timeout, read_timeout).
See :meth:`requests.Session.request` documentation for details.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:raises ValueError: if both ``project`` is ``None`` and the client's
project is also ``None``.
:returns: Iterator of all :class:`~google.cloud.storage.bucket.Bucket`
belonging to this project.
"""
if project is None:
project = self.project
if project is None:
raise ValueError("Client project not set: pass an explicit project.")
extra_params = {"project": project}
if prefix is not None:
extra_params["prefix"] = prefix
extra_params["projection"] = projection
if fields is not None:
extra_params["fields"] = fields
api_request = functools.partial(self._connection.api_request, timeout=timeout)
return page_iterator.HTTPIterator(
client=self,
api_request=api_request,
path="/b",
item_to_value=_item_to_bucket,
page_token=page_token,
max_results=max_results,
extra_params=extra_params,
)
def create_hmac_key(
self,
service_account_email,
project_id=None,
user_project=None,
timeout=_DEFAULT_TIMEOUT,
):
"""Create an HMAC key for a service account.
:type service_account_email: str
:param service_account_email: e-mail address of the service account
:type project_id: str
:param project_id: (Optional) Explicit project ID for the key.
Defaults to the client's project.
:type user_project: str
:param user_project: (Optional) This parameter is currently ignored.
:type timeout: float or tuple
:param timeout: (Optional) The amount of time, in seconds, to wait
for the server response.
Can also be passed as a tuple (connect_timeout, read_timeout).
See :meth:`requests.Session.request` documentation for details.
:rtype:
Tuple[:class:`~google.cloud.storage.hmac_key.HMACKeyMetadata`, str]
:returns: metadata for the created key, plus the bytes of the key's secret, which is an 40-character base64-encoded string.
"""
if project_id is None:
project_id = self.project
path = "/projects/{}/hmacKeys".format(project_id)
qs_params = {"serviceAccountEmail": service_account_email}
if user_project is not None:
qs_params["userProject"] = user_project
api_response = self._connection.api_request(
method="POST", path=path, query_params=qs_params, timeout=timeout
)
metadata = HMACKeyMetadata(self)
metadata._properties = api_response["metadata"]
secret = api_response["secret"]
return metadata, secret
def list_hmac_keys(
self,
max_results=None,
service_account_email=None,
show_deleted_keys=None,
project_id=None,
user_project=None,
timeout=_DEFAULT_TIMEOUT,
):
"""List HMAC keys for a project.
:type max_results: int
:param max_results:
(Optional) Max number of keys to return in a given page.
:type service_account_email: str
:param service_account_email:
(Optional) Limit keys to those created by the given service account.
:type show_deleted_keys: bool
:param show_deleted_keys:
(Optional) Included deleted keys in the list. Default is to
exclude them.
:type project_id: str
:param project_id: (Optional) Explicit project ID for the key.
Defaults to the client's project.
:type user_project: str
:param user_project: (Optional) This parameter is currently ignored.
:type timeout: float or tuple
:param timeout: (Optional) The amount of time, in seconds, to wait
for the server response.
Can also be passed as a tuple (connect_timeout, read_timeout).
See :meth:`requests.Session.request` documentation for details.
:rtype:
Tuple[:class:`~google.cloud.storage.hmac_key.HMACKeyMetadata`, str]
:returns: metadata for the created key, plus the bytes of the key's secret, which is an 40-character base64-encoded string.
"""
if project_id is None:
project_id = self.project
path = "/projects/{}/hmacKeys".format(project_id)
extra_params = {}
if service_account_email is not None:
extra_params["serviceAccountEmail"] = service_account_email
if show_deleted_keys is not None:
extra_params["showDeletedKeys"] = show_deleted_keys
if user_project is not None:
extra_params["userProject"] = user_project
api_request = functools.partial(self._connection.api_request, timeout=timeout)
return page_iterator.HTTPIterator(
client=self,
api_request=api_request,
path=path,
item_to_value=_item_to_hmac_key_metadata,
max_results=max_results,
extra_params=extra_params,
)
def get_hmac_key_metadata(
self, access_id, project_id=None, user_project=None, timeout=_DEFAULT_TIMEOUT
):
"""Return a metadata instance for the given HMAC key.
:type access_id: str
:param access_id: Unique ID of an existing key.
:type project_id: str
:param project_id: (Optional) Project ID of an existing key.
Defaults to client's project.
:type timeout: float or tuple
:param timeout: (Optional) The amount of time, in seconds, to wait
for the server response.
Can also be passed as a tuple (connect_timeout, read_timeout).
See :meth:`requests.Session.request` documentation for details.
:type user_project: str
:param user_project: (Optional) This parameter is currently ignored.
"""
metadata = HMACKeyMetadata(self, access_id, project_id, user_project)
metadata.reload(timeout=timeout) # raises NotFound for missing key
return metadata
def generate_signed_post_policy_v4(
self,
bucket_name,
blob_name,
expiration,
conditions=None,
fields=None,
credentials=None,
virtual_hosted_style=False,
bucket_bound_hostname=None,
scheme=None,
service_account_email=None,
access_token=None,
):
"""Generate a V4 signed policy object.
.. note::
Assumes ``credentials`` implements the
:class:`google.auth.credentials.Signing` interface. Also assumes
``credentials`` has a ``service_account_email`` property which
identifies the credentials.
Generated policy object allows user to upload objects with a POST request.
:type bucket_name: str
:param bucket_name: Bucket name.
:type blob_name: str
:param blob_name: Object name.
:type expiration: Union[Integer, datetime.datetime, datetime.timedelta]
:param expiration: Policy expiration time.
:type conditions: list
:param conditions: (Optional) List of POST policy conditions, which are
used to restrict what is allowed in the request.
:type fields: dict
:param fields: (Optional) Additional elements to include into request.
:type credentials: :class:`google.auth.credentials.Signing`
:param credentials: (Optional) Credentials object with an associated private
key to sign text.
:type virtual_hosted_style: bool
:param virtual_hosted_style: (Optional) If True, construct the URL relative to the bucket
virtual hostname, e.g., '<bucket-name>.storage.googleapis.com'.
:type bucket_bound_hostname: str
:param bucket_bound_hostname:
(Optional) If passed, construct the URL relative to the bucket-bound hostname.
Value can be bare or with a scheme, e.g., 'example.com' or 'http://example.com'.
See: https://cloud.google.com/storage/docs/request-endpoints#cname
:type scheme: str
:param scheme:
(Optional) If ``bucket_bound_hostname`` is passed as a bare hostname, use
this value as a scheme. ``https`` will work only when using a CDN.
Defaults to ``"http"``.
:type service_account_email: str
:param service_account_email: (Optional) E-mail address of the service account.
:type access_token: str
:param access_token: (Optional) Access token for a service account.
:rtype: dict
:returns: Signed POST policy.
Example:
Generate signed POST policy and upload a file.
>>> from google.cloud import storage
>>> client = storage.Client()
>>> policy = client.generate_signed_post_policy_v4(
"bucket-name",
"blob-name",
expiration=datetime.datetime(2020, 3, 17),
conditions=[
["content-length-range", 0, 255]
],
fields=[
"x-goog-meta-hello" => "world"
],
)
>>> with open("bucket-name", "rb") as f:
files = {"file": ("bucket-name", f)}
requests.post(policy["url"], data=policy["fields"], files=files)
"""
credentials = self._credentials if credentials is None else credentials
ensure_signed_credentials(credentials)
# prepare policy conditions and fields
timestamp, datestamp = get_v4_now_dtstamps()
x_goog_credential = "{email}/{datestamp}/auto/storage/goog4_request".format(
email=credentials.signer_email, datestamp=datestamp
)
required_conditions = [
{"bucket": bucket_name},
{"key": blob_name},
{"x-goog-date": timestamp},
{"x-goog-credential": x_goog_credential},
{"x-goog-algorithm": "GOOG4-RSA-SHA256"},
]
conditions = conditions or []
policy_fields = {}
for key, value in sorted((fields or {}).items()):
if not key.startswith("x-ignore-"):
policy_fields[key] = value
conditions.append({key: value})
conditions += required_conditions
# calculate policy expiration time
now = _NOW()
if expiration is None:
expiration = now + datetime.timedelta(hours=1)
policy_expires = now + datetime.timedelta(
seconds=get_expiration_seconds_v4(expiration)
)
# encode policy for signing
policy = json.dumps(
collections.OrderedDict(
sorted(
{
"conditions": conditions,
"expiration": policy_expires.isoformat() + "Z",
}.items()
)
),
separators=(",", ":"),
)
str_to_sign = base64.b64encode(policy.encode("utf-8"))
# sign the policy and get its cryptographic signature
if access_token and service_account_email:
signature = _sign_message(str_to_sign, access_token, service_account_email)
signature_bytes = base64.b64decode(signature)
else:
signature_bytes = credentials.sign_bytes(str_to_sign)
# get hexadecimal representation of the signature
signature = binascii.hexlify(signature_bytes).decode("utf-8")
policy_fields.update(
{
"key": blob_name,
"x-goog-algorithm": "GOOG4-RSA-SHA256",
"x-goog-credential": x_goog_credential,
"x-goog-date": timestamp,
"x-goog-signature": signature,
"policy": str_to_sign,
}
)
# designate URL
if virtual_hosted_style:
url = "https://{}.storage.googleapis.com/".format(bucket_name)
elif bucket_bound_hostname:
if ":" in bucket_bound_hostname: # URL includes scheme
url = bucket_bound_hostname
else: # scheme is given separately
url = "{scheme}://{host}/".format(
scheme=scheme, host=bucket_bound_hostname
)
else:
url = "https://storage.googleapis.com/{}/".format(bucket_name)
return {"url": url, "fields": policy_fields}
def _item_to_bucket(iterator, item):
"""Convert a JSON bucket to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that has retrieved the item.
:type item: dict
:param item: An item to be converted to a bucket.
:rtype: :class:`.Bucket`
:returns: The next bucket in the page.
"""
name = item.get("name")
bucket = Bucket(iterator.client, name)
bucket._set_properties(item)
return bucket
def _item_to_hmac_key_metadata(iterator, item):
"""Convert a JSON key metadata resource to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that has retrieved the item.
:type item: dict
:param item: An item to be converted to a key metadata instance.
:rtype: :class:`~google.cloud.storage.hmac_key.HMACKeyMetadata`
:returns: The next key metadata instance in the page.
"""
metadata = HMACKeyMetadata(iterator.client)
metadata._properties = item
return metadata
|
the-stack_0_26713
|
''' Print the supplementary part of New GRE Upgrade book
with respect to Magoosh GRE flashcards
'''
from util.common import load_wordlist
magoosh_path = 'wordlists/magoosh-gre'
barron_path = 'wordlists/barron-800'
upgrade_path = 'wordlists/gre-upgrade'
magoosh_words = set(load_wordlist(magoosh_path))
upgrade_words = sorted([
(k, v) for k, v in load_wordlist(upgrade_path, detailed=True).items()
if k not in magoosh_words
])
# new gre upgrade
print('# New GRE Vocabulary Upgrade')
for word, struct in upgrade_words:
print('### {}'.format(word))
for k, meaning in enumerate(struct['meanings']):
print('{}. {} {}'.format(
'i' * (k + 1), meaning['part'], meaning['definition']
))
print()
if meaning['synonyms']:
print(' ' * 2 + 's. *{}*'.format(
', '.join(meaning['synonyms'])
))
print()
if meaning['antonyms']:
print(' ' * 2 + 'a. *{}*'.format(
', '.join(meaning['antonyms'])
))
print()
print()
|
the-stack_0_26714
|
from mmcv.utils import collect_env as collect_basic_env
from mmcv.utils import get_git_hash
import mmaction
def collect_env():
env_info = collect_basic_env()
env_info['MMAction2'] = (
mmaction.__version__ + '+' + get_git_hash(digits=7))
return env_info
if __name__ == '__main__':
for name, val in collect_env().items():
print('{}: {}'.format(name, val))
|
the-stack_0_26715
|
from __future__ import absolute_import
import time
import logging
from collections import namedtuple
from itertools import takewhile
import email
import re
from ..exceptions import (
ConnectTimeoutError,
MaxRetryError,
ProtocolError,
ReadTimeoutError,
ResponseError,
InvalidHeader,
)
import six
log = logging.getLogger(__name__)
# Data structure for representing the metadata of requests that result in a retry.
RequestHistory = namedtuple('RequestHistory', ["method", "url", "error",
"status", "redirect_location"])
class Retry(object):
""" Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts. It's a good idea to set this to some sensibly-high value to
account for unexpected edge cases and avoid infinite retry loops.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int status:
How many times to retry on bad status codes.
These are retries made on responses, where status code matches
``status_forcelist``.
Set to ``0`` to fail on the first retry of this type.
:param iterable method_whitelist:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
idempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
Set to a ``False`` value to retry on any verb.
:param iterable status_forcelist:
A set of integer HTTP status codes that we should force a retry on.
A retry is initiated if the request method is in ``method_whitelist``
and the response status code is in ``status_forcelist``.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts after the second try
(most errors are resolved immediately by a second try without a
delay). urllib3 will sleep for::
{backoff factor} * (2 ** ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.BACKOFF_MAX`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
:param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
whether we should raise an exception, or return a response,
if status falls in ``status_forcelist`` range and retries have
been exhausted.
:param tuple history: The history of the request encountered during
each call to :meth:`~Retry.increment`. The list is in the order
the requests occurred. Each list item is of class :class:`RequestHistory`.
:param bool respect_retry_after_header:
Whether to respect Retry-After header on status codes defined as
:attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
:param iterable remove_headers_on_redirect:
Sequence of headers to remove from the request when a response
indicating a redirect is returned before firing off the redirected
request.
"""
DEFAULT_METHOD_WHITELIST = frozenset([
'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
DEFAULT_REDIRECT_HEADERS_BLACKLIST = frozenset(['Authorization'])
#: Maximum backoff time.
BACKOFF_MAX = 120
def __init__(self, total=10, connect=None, read=None, redirect=None, status=None,
method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
backoff_factor=0, raise_on_redirect=True, raise_on_status=True,
history=None, respect_retry_after_header=True,
remove_headers_on_redirect=DEFAULT_REDIRECT_HEADERS_BLACKLIST):
self.total = total
self.connect = connect
self.read = read
self.status = status
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self.raise_on_status = raise_on_status
self.history = history or tuple()
self.respect_retry_after_header = respect_retry_after_header
self.remove_headers_on_redirect = remove_headers_on_redirect
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect, read=self.read, redirect=self.redirect, status=self.status,
method_whitelist=self.method_whitelist,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
raise_on_status=self.raise_on_status,
history=self.history,
remove_headers_on_redirect=self.remove_headers_on_redirect
)
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r", retries, new_retries)
return new_retries
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
# We want to consider only the last consecutive errors sequence (Ignore redirects).
consecutive_errors_len = len(list(takewhile(lambda x: x.redirect_location is None,
reversed(self.history))))
if consecutive_errors_len <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
return min(self.BACKOFF_MAX, backoff_value)
def parse_retry_after(self, retry_after):
# Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
if re.match(r"^\s*[0-9]+\s*$", retry_after):
seconds = int(retry_after)
else:
retry_date_tuple = email.utils.parsedate(retry_after)
if retry_date_tuple is None:
raise InvalidHeader("Invalid Retry-After header: %s" % retry_after)
retry_date = time.mktime(retry_date_tuple)
seconds = retry_date - time.time()
if seconds < 0:
seconds = 0
return seconds
def get_retry_after(self, response):
""" Get the value of Retry-After in seconds. """
retry_after = response.getheader("Retry-After")
if retry_after is None:
return None
return self.parse_retry_after(retry_after)
def sleep_for_retry(self, response=None):
retry_after = self.get_retry_after(response)
if retry_after:
time.sleep(retry_after)
return True
return False
def _sleep_backoff(self):
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def sleep(self, response=None):
""" Sleep between retry attempts.
This method will respect a server's ``Retry-After`` response header
and sleep the duration of the time requested. If that is not present, it
will use an exponential backoff. By default, the backoff factor is 0 and
this method will return immediately.
"""
if response:
slept = self.sleep_for_retry(response)
if slept:
return
self._sleep_backoff()
def _is_connection_error(self, err):
""" Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
""" Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def _is_method_retryable(self, method):
""" Checks if a given HTTP method should be retried upon, depending if
it is included on the method whitelist.
"""
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return True
def is_retry(self, method, status_code, has_retry_after=False):
""" Is this method/status code retryable? (Based on whitelists and control
variables such as the number of total retries to allow, whether to
respect the Retry-After header, whether this header is present, and
whether the returned status code is on the list of status codes to
be retried upon on the presence of the aforementioned header)
"""
if not self._is_method_retryable(method):
return False
if self.status_forcelist and status_code in self.status_forcelist:
return True
return (self.total and self.respect_retry_after_header and
has_retry_after and (status_code in self.RETRY_AFTER_STATUS_CODES))
def is_exhausted(self):
""" Are we out of retries? """
retry_counts = (self.total, self.connect, self.read, self.redirect, self.status)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(self, method=None, url=None, response=None, error=None,
_pool=None, _stacktrace=None):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
connect = self.connect
read = self.read
redirect = self.redirect
status_count = self.status
cause = 'unknown'
status = None
redirect_location = None
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
elif error and self._is_read_error(error):
# Read retry?
if read is False or not self._is_method_retryable(method):
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = 'too many redirects'
redirect_location = response.get_redirect_location()
status = response.status
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and a the given method is in the whitelist
cause = ResponseError.GENERIC_ERROR
if response and response.status:
if status_count is not None:
status_count -= 1
cause = ResponseError.SPECIFIC_ERROR.format(
status_code=response.status)
status = response.status
history = self.history + (RequestHistory(method, url, error, status, redirect_location),)
new_retry = self.new(
total=total,
connect=connect, read=read, redirect=redirect, status=status_count,
history=history)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
return new_retry
def __repr__(self):
return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
'read={self.read}, redirect={self.redirect}, status={self.status})').format(
cls=type(self), self=self)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
|
the-stack_0_26716
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('project', '0004_auto_20160308_0246'),
]
operations = [
migrations.AddField(
model_name='reservation',
name='map',
field=models.CharField(default='city', max_length=100),
preserve_default=False,
),
]
|
the-stack_0_26717
|
import torch
class Trader:
def __init__(self, initial_capital, stakes=0):
self.initial_capital = initial_capital
self.equity = self.initial_capital
self.portfolio = stakes
self.portfolio_value = [self.initial_capital]
def buy(self, conf, price):
units = int(conf * self.equity / price)
if units <= 0:
return
self.equity -= price * units
self.portfolio += units
self.portfolio_value.append(self.equity + self.portfolio * price)
def sell(self, conf, price):
units = int(conf * self.portfolio)
if units <= 0:
return
self.equity += price * units
self.portfolio -= units
self.portfolio_value.append(self.equity + self.portfolio * price)
def compute_returns(self, price):
self.sell(1, price)
returns = []
prev_value = self.portfolio_value[0]
for value in self.portfolio_value[1:]:
returns.append(value / prev_value)
prev_value = value
return returns
def trade(self, signal, price):
# Buy = 2, Hold = 1, Sell = 0
buy_conf = (signal == 2).sum()
sell_conf = (signal == 0).sum()
if buy_conf > sell_conf:
self.buy(buy_conf / len(signal), price)
elif sell_conf > buy_conf:
self.sell(sell_conf / len(signal), price)
else:
self.portfolio_value.append(self.equity + self.portfolio * price)
class ParallelTrader:
def __init__(self, traders):
self.traders = traders
def trade(self, signal, price):
for trader in self.traders:
trader.trade(signal, price)
def compute_returns(self, price):
returns = []
for trader in self.traders:
returns.append(trader.compute_returns(price))
return returns
|
the-stack_0_26718
|
# coding: utf-8
import logging
import os.path
from io import StringIO
from edd import TestCase
from .form_utils import (
extract_floats_from_form,
extract_integers_from_form,
extract_non_blank_string_from_form,
)
from .parsers import biolector, gc_ms, skyline
from .parsers.excel import export_to_xlsx, import_xlsx_table, import_xlsx_tables
test_dir = os.path.join(os.path.dirname(__file__), "fixtures", "misc_data")
logger = logging.getLogger(__name__)
########################################################################
# GC-MS
class GCMSTests(TestCase):
def test_1(self):
test_file = os.path.join(test_dir, "gc_ms_1.txt")
result = gc_ms.run([test_file], out=StringIO(), err=StringIO())
self.assertEqual(len(result.samples), 102)
err = StringIO()
out = StringIO()
result.show_peak_areas(out=out, err=err)
self.assertIn("0059.D 562 None None", out.getvalue())
self.assertIn("0062.D 104049 1192526 35926", out.getvalue())
self.assertIn("WARNING: 2 peaks near 8.092 for sample 0062.D", err.getvalue())
self.assertEqual(err.getvalue().count("WARNING"), 44)
err = StringIO()
out = StringIO()
result.show_peak_areas_csv(out=out, err=err)
self.assertIn("0059.D,562,None,None", out.getvalue())
self.assertIn("0062.D,104049,1192526,35926", out.getvalue())
def test_2(self):
# a slightly different format
test_file = os.path.join(test_dir, "gc_ms_2.txt")
with open(os.path.join(test_dir, "gc_ms_2.out.txt")) as f:
test_out = f.read()
result = gc_ms.run([test_file], out=StringIO(), err=StringIO())
self.assertEqual(len(result.samples), 5)
err = StringIO()
out = StringIO()
result.show_peak_areas(out=out, err=err)
self.assertEqual(out.getvalue(), test_out)
# Fault tolerance
test_file = os.path.join(test_dir, "skyline.csv")
with self.assertRaises(ValueError):
result = gc_ms.run([test_file], out=StringIO(), err=StringIO())
def test_xls_key(self):
# Import .xlsx workbook
test_file = os.path.join(test_dir, "sample_gc_ms_key.xlsx")
with open(test_file, "rb") as file:
headers, table = gc_ms.import_xlsx_metadata(file)
self.assertEqual(
headers,
[
"sample ID (could be vial #)",
"label to display",
"parent strain",
"plasmid/change",
"colony number",
"time point",
"media (induction, etc.)",
"sample type",
None,
"user field 1",
"user field 2",
"user field 3",
],
)
class SkylineTests(TestCase):
def test_1(self):
file_name = os.path.join(test_dir, "skyline.csv")
parser = skyline.SkylineParser()
with open(file_name, "r") as file:
result = parser.export(file)
self.assertIn(skyline.Record("4", "A", 22), result["rows"])
class BiolectorTests(TestCase):
def test_simple(self):
filename = "/code/edd_utils/parsers/biolector/biolector_test_file.xml"
with open(filename, "r") as file:
results = biolector.getRawImportRecordsAsJSON(file, 0)
self.assertEqual(len(results), 48)
last_v = results[-1]["data"][-1][1]
self.assertEqual(last_v, "8.829")
well_v = results[20]["metadata_by_name"]["Bio:well"]
self.assertEqual(well_v, "C05")
def get_table():
return [
["Some random text we want to ignore", None, None, None, None, None],
["More random", 2.5, None, None, None, None],
[None, None, None, None, None, None],
[None, None, None, None, None, None],
[None, "sample ID", "line ID", "replica", "molecule1", "molecule 2"],
[None, "abcd1", "line1", 1, 5.5, 6.5],
[None, "abcd2", "line1", 2, 4.0, 7.3],
[None, "abcd3", "line2", 1, 3.5, 8.8],
[None, "abcd4", "line2", 2, 2.0, 9.6],
[None, None, None, None, None, None],
["Summary line", None, None, None, 3.75, 8.05],
[None, None, None, None, None, None],
]
def make_simple(t, file_name):
return export_to_xlsx(t, file_name=file_name, title=file_name)
class ExcelTests(TestCase):
def test_simple(self):
make_simple(get_table(), "tst1.xlsx")
result = import_xlsx_tables("tst1.xlsx")
t = result["worksheets"][0][0]
self.assertEqual(
t["headers"], ["sample ID", "line ID", "replica", "molecule1", "molecule 2"]
)
self.assertEqual(
t["values"],
[
["abcd1", "line1", 1, 5.5, 6.5],
["abcd2", "line1", 2, 4, 7.3],
["abcd3", "line2", 1, 3.5, 8.8],
["abcd4", "line2", 2, 2, 9.6],
],
)
result2 = import_xlsx_tables(
"tst1.xlsx", worksheet_name="tst1.xlsx", column_search_text="sample"
)
t2 = result2["worksheets"][0][0]
self.assertEqual(t2, t)
# note different function
result3 = import_xlsx_table("tst1.xlsx")
self.assertEqual(result3, t)
result4 = import_xlsx_table(
"tst1.xlsx", column_labels=["sample id", "molecule1", "MOLECULE 2"]
)
self.assertEqual(
result4,
{
"headers": ["sample ID", "molecule1", "molecule 2"],
"values": [
["abcd1", 5.5, 6.5],
["abcd2", 4, 7.3],
["abcd3", 3.5, 8.8],
["abcd4", 2, 9.6],
],
},
)
os.remove("tst1.xlsx")
def test_error_handling(self):
t3 = get_table()
t3[7][1] = None
make_simple(t3, "tst3.xlsx")
result = import_xlsx_tables("tst3.xlsx")
self.assertEqual(
result,
{
"worksheets": [
[
{
"headers": [
"sample ID",
"line ID",
"replica",
"molecule1",
"molecule 2",
],
"values": [
["abcd1", "line1", 1, 5.5, 6.5],
["abcd2", "line1", 2, 4, 7.3],
[None, "line2", 1, 3.5, 8.8],
["abcd4", "line2", 2, 2, 9.6],
],
}
]
]
},
)
# ask for missing worksheet
with self.assertRaises(KeyError):
import_xlsx_table("tst3.xlsx", worksheet_name="foo")
os.remove("tst3.xlsx")
def test_non_numeric(self):
get_table()
class UtilsTests(TestCase):
def test_form_handling(self):
form = {
"int1": "1",
"float1": "2.5",
"int2": "1.5",
"float2": "2",
"int3": ["1", "2", "3"],
"float3": ["1.5"],
"int4": ["1.5", "2", "3"],
"float4": "",
"str1": "foo",
"str2": ["foo", "bar"],
"str3": "",
}
self.assertEqual(extract_integers_from_form(form, "int1"), 1)
self.assertEqual(extract_floats_from_form(form, "int1"), 1.0)
self.assertEqual(
extract_integers_from_form(form, "int3", allow_list=True), [1, 2, 3]
)
self.assertEqual(
extract_floats_from_form(form, "float3", allow_list=True), [1.5]
)
self.assertEqual(extract_non_blank_string_from_form(form, "str1"), "foo")
self.assertEqual(
extract_non_blank_string_from_form(form, "str2", allow_list=True),
["foo", "bar"],
)
with self.assertRaises(TypeError):
extract_integers_from_form(form, "int3")
with self.assertRaises(ValueError):
extract_integers_from_form(form, "int2")
with self.assertRaises(KeyError):
extract_integers_from_form(form, "int5")
with self.assertRaises(ValueError):
extract_integers_from_form(form, "int4", allow_list=True)
with self.assertRaises(ValueError):
extract_non_blank_string_from_form(form, "str3")
self.assertIsNone(
extract_non_blank_string_from_form(
form, "str3", return_none_if_missing=True
)
)
self.assertIsNone(
extract_floats_from_form(form, "float4", return_none_if_missing=True)
)
|
the-stack_0_26721
|
# Example connect and reset. should show syn-syn/ack-rst
from trex.astf.api import *
# we can send either Python bytes type as below:
http_req = b'GET /3384 HTTP/1.1\r\nHost: 22.0.0.3\r\nConnection: Keep-Alive\r\nUser-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)\r\nAccept: */*\r\nAccept-Language: en-us\r\nAccept-Encoding: gzip, deflate, compress\r\n\r\n'
# or we can send Python string containing ascii chars, as below:
http_response = 'HTTP/1.1 200 OK\r\nServer: Microsoft-IIS/6.0\r\nContent-Type: text/html\r\nContent-Length: 32000\r\n\r\n<html><pre>**********</pre></html>'
class Prof1():
def __init__(self):
pass # tunables
def create_profile(self):
# client commands
prog_c = ASTFProgram()
prog_c.connect(); ## connect
prog_c.reset(); ## send RST from client side
prog_s = ASTFProgram()
prog_s.wait_for_peer_close(); # wait for client to close the socket
# ip generator
ip_gen_c = ASTFIPGenDist(ip_range=["16.0.0.0", "16.0.0.255"], distribution="seq")
ip_gen_s = ASTFIPGenDist(ip_range=["48.0.0.0", "48.0.255.255"], distribution="seq")
ip_gen = ASTFIPGen(glob=ASTFIPGenGlobal(ip_offset="1.0.0.0"),
dist_client=ip_gen_c,
dist_server=ip_gen_s)
# template
temp_c = ASTFTCPClientTemplate(program=prog_c, ip_gen=ip_gen)
temp_s = ASTFTCPServerTemplate(program=prog_s) # using default association
template = ASTFTemplate(client_template=temp_c, server_template=temp_s)
# profile
profile = ASTFProfile(default_ip_gen=ip_gen, templates=template)
return profile
def get_profile(self,**kwargs):
return self.create_profile()
def register():
return Prof1()
|
the-stack_0_26723
|
import numpy as np
import scipy.io.wavfile as wf
import dtcwpt
class dtcwpt_wav(object):
def __init__(self,path,n_levels=3,dtype=np.dtype('complex64')):
self.path = path
self.n_levels = n_levels
self.dtype = dtype
self.real_dtype = dtype.type().real.dtype
signal_sample_rate,signal = wf.read(path, mmap=True)
self.signal_sample_rate = signal_sample_rate
self.signal_channels = signal.shape[1]
self.signal = signal
@property
def shape(self):
return (self.signal_length, self.sample_rate, self.signal_channels)
@property
def sample_rate(self):
return 2**self.n_levels
@property
def signal_length(self):
return len(self.signal)
@property
def signal_padding(self):
lpad = self.sample_rate//2
rpad = (self.signal_length-self.signal_length%self.sample_rate)%self.sample_rate
return (lpad, rpad)
def __len__(self):
return (self.signal_length+sum(self.signal_padding))//self.sample_rate
def __getitem__(self, key):
start, stop, stride = None, None, 1
if hasattr(key,'__index__'):
start = key.__index__()
stop = start + 1
elif isinstance(key,(slice)):
start, stop, stride = key.indices(len(self))
if stride != 1:
raise IndexError("stride must be 1")
if not isinstance(start, (int)):
raise TypeError("indices must be integers")
n_timeseries = stop-start
n_samples = self.sample_rate*n_timeseries
(lpad, rpad) = self.signal_padding
lpad = lpad if (start == 0) else 0
rpad = rpad if (stop == len(self)) else 0
signal_start = start*self.sample_rate-self.sample_rate//2+lpad
signal_stop = stop*self.sample_rate-self.sample_rate//2-rpad
chunk = self.signal[signal_start:signal_stop]
chunk = np.pad(chunk.astype(self.real_dtype),((lpad,rpad),(0,0)))
packets = np.ndarray((self.signal_channels,n_timeseries,self.sample_rate),dtype=self.dtype)
for channel in range(self.signal_channels):
chunk_channel = chunk.T[channel][np.newaxis].T
packets[channel] = dtcwpt.forward(chunk_channel,self.n_levels)
#if not isinstance(key,(slice)):
# return packets[:,0]
return np.transpose(packets,(1,2,0))
#return packets.T
|
the-stack_0_26724
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
import signal
import sys
import threading
import time
import unittest
import ray.local_scheduler as local_scheduler
import ray.plasma as plasma
USE_VALGRIND = False
ID_SIZE = 20
NIL_WORKER_ID = 20 * b"\xff"
NIL_ACTOR_ID = 20 * b"\xff"
def random_object_id():
return local_scheduler.ObjectID(np.random.bytes(ID_SIZE))
def random_driver_id():
return local_scheduler.ObjectID(np.random.bytes(ID_SIZE))
def random_task_id():
return local_scheduler.ObjectID(np.random.bytes(ID_SIZE))
def random_function_id():
return local_scheduler.ObjectID(np.random.bytes(ID_SIZE))
class TestLocalSchedulerClient(unittest.TestCase):
def setUp(self):
# Start Plasma store.
plasma_store_name, self.p1 = plasma.start_plasma_store()
self.plasma_client = plasma.PlasmaClient(plasma_store_name,
release_delay=0)
# Start a local scheduler.
scheduler_name, self.p2 = local_scheduler.start_local_scheduler(
plasma_store_name, use_valgrind=USE_VALGRIND)
# Connect to the scheduler.
self.local_scheduler_client = local_scheduler.LocalSchedulerClient(
scheduler_name, NIL_WORKER_ID, NIL_ACTOR_ID, False, 0)
def tearDown(self):
# Check that the processes are still alive.
self.assertEqual(self.p1.poll(), None)
self.assertEqual(self.p2.poll(), None)
# Kill Plasma.
self.p1.kill()
# Kill the local scheduler.
if USE_VALGRIND:
self.p2.send_signal(signal.SIGTERM)
self.p2.wait()
if self.p2.returncode != 0:
os._exit(-1)
else:
self.p2.kill()
def test_submit_and_get_task(self):
function_id = random_function_id()
object_ids = [random_object_id() for i in range(256)]
# Create and seal the objects in the object store so that we can schedule
# all of the subsequent tasks.
for object_id in object_ids:
self.plasma_client.create(object_id.id(), 0)
self.plasma_client.seal(object_id.id())
# Define some arguments to use for the tasks.
args_list = [
[],
[{}],
[()],
1 * [1],
10 * [1],
100 * [1],
1000 * [1],
1 * ["a"],
10 * ["a"],
100 * ["a"],
1000 * ["a"],
[1, 1.3, 1 << 100, "hi", u"hi", [1, 2]],
object_ids[:1],
object_ids[:2],
object_ids[:3],
object_ids[:4],
object_ids[:5],
object_ids[:10],
object_ids[:100],
object_ids[:256],
[1, object_ids[0]],
[object_ids[0], "a"],
[1, object_ids[0], "a"],
[object_ids[0], 1, object_ids[1], "a"],
object_ids[:3] + [1, "hi", 2.3] + object_ids[:5],
object_ids + 100 * ["a"] + object_ids
]
for args in args_list:
for num_return_vals in [0, 1, 2, 3, 5, 10, 100]:
task = local_scheduler.Task(random_driver_id(), function_id, args,
num_return_vals, random_task_id(), 0)
# Submit a task.
self.local_scheduler_client.submit(task)
# Get the task.
new_task = self.local_scheduler_client.get_task()
self.assertEqual(task.function_id().id(), new_task.function_id().id())
retrieved_args = new_task.arguments()
returns = new_task.returns()
self.assertEqual(len(args), len(retrieved_args))
self.assertEqual(num_return_vals, len(returns))
for i in range(len(retrieved_args)):
if isinstance(args[i], local_scheduler.ObjectID):
self.assertEqual(args[i].id(), retrieved_args[i].id())
else:
self.assertEqual(args[i], retrieved_args[i])
# Submit all of the tasks.
for args in args_list:
for num_return_vals in [0, 1, 2, 3, 5, 10, 100]:
task = local_scheduler.Task(random_driver_id(), function_id, args,
num_return_vals, random_task_id(), 0)
self.local_scheduler_client.submit(task)
# Get all of the tasks.
for args in args_list:
for num_return_vals in [0, 1, 2, 3, 5, 10, 100]:
new_task = self.local_scheduler_client.get_task()
def test_scheduling_when_objects_ready(self):
# Create a task and submit it.
object_id = random_object_id()
task = local_scheduler.Task(random_driver_id(), random_function_id(),
[object_id], 0, random_task_id(), 0)
self.local_scheduler_client.submit(task)
# Launch a thread to get the task.
def get_task():
self.local_scheduler_client.get_task()
t = threading.Thread(target=get_task)
t.start()
# Sleep to give the thread time to call get_task.
time.sleep(0.1)
# Create and seal the object ID in the object store. This should trigger a
# scheduling event.
self.plasma_client.create(object_id.id(), 0)
self.plasma_client.seal(object_id.id())
# Wait until the thread finishes so that we know the task was scheduled.
t.join()
def test_scheduling_when_objects_evicted(self):
# Create a task with two dependencies and submit it.
object_id1 = random_object_id()
object_id2 = random_object_id()
task = local_scheduler.Task(random_driver_id(), random_function_id(),
[object_id1, object_id2], 0, random_task_id(),
0)
self.local_scheduler_client.submit(task)
# Launch a thread to get the task.
def get_task():
self.local_scheduler_client.get_task()
t = threading.Thread(target=get_task)
t.start()
# Make one of the dependencies available.
buf = self.plasma_client.create(object_id1.id(), 1)
self.plasma_client.seal(object_id1.id())
# Release the object.
del buf
# Check that the thread is still waiting for a task.
time.sleep(0.1)
self.assertTrue(t.is_alive())
# Force eviction of the first dependency.
self.plasma_client.evict(plasma.DEFAULT_PLASMA_STORE_MEMORY)
# Check that the thread is still waiting for a task.
time.sleep(0.1)
self.assertTrue(t.is_alive())
# Check that the first object dependency was evicted.
object1 = self.plasma_client.get([object_id1.id()], timeout_ms=0)
self.assertEqual(object1, [None])
# Check that the thread is still waiting for a task.
time.sleep(0.1)
self.assertTrue(t.is_alive())
# Create the second dependency.
self.plasma_client.create(object_id2.id(), 1)
self.plasma_client.seal(object_id2.id())
# Check that the thread is still waiting for a task.
time.sleep(0.1)
self.assertTrue(t.is_alive())
# Create the first dependency again. Both dependencies are now available.
self.plasma_client.create(object_id1.id(), 1)
self.plasma_client.seal(object_id1.id())
# Wait until the thread finishes so that we know the task was scheduled.
t.join()
if __name__ == "__main__":
if len(sys.argv) > 1:
# Pop the argument so we don't mess with unittest's own argument parser.
if sys.argv[-1] == "valgrind":
arg = sys.argv.pop()
USE_VALGRIND = True
print("Using valgrind for tests")
unittest.main(verbosity=2)
|
the-stack_0_26725
|
# -*- coding: utf-8 -*-
# $Id: valueunit.py 69111 2017-10-17 14:26:02Z vboxsync $
"""
Test Value Unit Definititions.
This must correspond 1:1 with include/iprt/test.h and
include/VBox/VMMDevTesting.h.
"""
__copyright__ = \
"""
Copyright (C) 2012-2017 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 69111 $"
## @name Unit constants.
## Used everywhere.
## @note Using upper case here so we can copy, past and chop from the other
# headers.
## @{
PCT = 0x01;
BYTES = 0x02;
BYTES_PER_SEC = 0x03;
KILOBYTES = 0x04;
KILOBYTES_PER_SEC = 0x05;
MEGABYTES = 0x06;
MEGABYTES_PER_SEC = 0x07;
PACKETS = 0x08;
PACKETS_PER_SEC = 0x09;
FRAMES = 0x0a;
FRAMES_PER_SEC = 0x0b;
OCCURRENCES = 0x0c;
OCCURRENCES_PER_SEC = 0x0d;
CALLS = 0x0e;
CALLS_PER_SEC = 0x0f;
ROUND_TRIP = 0x10;
SECS = 0x11;
MS = 0x12;
NS = 0x13;
NS_PER_CALL = 0x14;
NS_PER_FRAME = 0x15;
NS_PER_OCCURRENCE = 0x16;
NS_PER_PACKET = 0x17;
NS_PER_ROUND_TRIP = 0x18;
INSTRS = 0x19;
INSTRS_PER_SEC = 0x1a;
NONE = 0x1b;
PP1K = 0x1c;
PP10K = 0x1d;
PPM = 0x1e;
PPB = 0x1f;
END = 0x20;
## @}
## Translate constant to string.
g_asNames = \
[
'invalid', # 0
'%',
'bytes',
'bytes/s',
'KiB',
'KiB/s',
'MiB',
'MiB/s',
'packets',
'packets/s',
'frames',
'frames/s',
'occurrences',
'occurrences/s',
'calls',
'calls/s',
'roundtrips',
's',
'ms',
'ns',
'ns/call',
'ns/frame',
'ns/occurrences',
'ns/packet',
'ns/roundtrips',
'ins',
'ins/s',
'', # none
'pp1k',
'pp10k',
'ppm',
'ppb',
];
assert g_asNames[PP1K] == 'pp1k';
## Translation table for XML -> number.
g_kdNameToConst = \
{
'KB': KILOBYTES,
'KB/s': KILOBYTES_PER_SEC,
'MB': MEGABYTES,
'MB/s': MEGABYTES_PER_SEC,
'occurrences': OCCURRENCES,
'occurrences/s': OCCURRENCES_PER_SEC,
};
for i in range(1, len(g_asNames)):
g_kdNameToConst[g_asNames[i]] = i;
|
the-stack_0_26728
|
from common.api_config import CommonApiConfig
from util.config import Config, ConfigField
class PluginsConfig(Config):
backends_dir = ConfigField(type=str, required=True, default='plugins/backends')
class MasterConfig(Config):
api = CommonApiConfig(common_logger='dedalus.master.api.common',
access_logger='dedalus.master.api.access',
port=8080)
backend = ConfigField(type=str, required=True, default='leveldb')
backend_config = ConfigField(type=dict, required=True, default=dict())
plugins = PluginsConfig()
|
the-stack_0_26730
|
# Copyright (C) 2001-2010 Python Software Foundation
# Author: Barry Warsaw
# Contact: [email protected]
"""Miscellaneous utilities."""
__all__ = [
'collapse_rfc2231_value',
'decode_params',
'decode_rfc2231',
'encode_rfc2231',
'formataddr',
'formatdate',
'format_datetime',
'getaddresses',
'make_msgid',
'mktime_tz',
'parseaddr',
'parsedate',
'parsedate_tz',
'parsedate_to_datetime',
'unquote',
]
import os
import re
import time
import random
import socket
import datetime
import urllib.parse
from email._parseaddr import quote
from email._parseaddr import AddressList as _AddressList
from email._parseaddr import mktime_tz
from email._parseaddr import parsedate, parsedate_tz, _parsedate_tz
# Intrapackage imports
from email.charset import Charset
COMMASPACE = ', '
EMPTYSTRING = ''
UEMPTYSTRING = ''
CRLF = '\r\n'
TICK = "'"
specialsre = re.compile(r'[][\\()<>@,:;".]')
escapesre = re.compile(r'[\\"]')
def _has_surrogates(s):
"""Return True if s contains surrogate-escaped binary data."""
# This check is based on the fact that unless there are surrogates, utf8
# (Python's default encoding) can encode any string. This is the fastest
# way to check for surrogates, see issue 11454 for timings.
try:
s.encode()
return False
except UnicodeEncodeError:
return True
# How to deal with a string containing bytes before handing it to the
# application through the 'normal' interface.
def _sanitize(string):
# Turn any escaped bytes into unicode 'unknown' char. If the escaped
# bytes happen to be utf-8 they will instead get decoded, even if they
# were invalid in the charset the source was supposed to be in. This
# seems like it is not a bad thing; a defect was still registered.
original_bytes = string.encode('utf-8', 'surrogateescape')
return original_bytes.decode('utf-8', 'replace')
# Helpers
def formataddr(pair, charset='utf-8'):
"""The inverse of parseaddr(), this takes a 2-tuple of the form
(realname, email_address) and returns the string value suitable
for an RFC 2822 From, To or Cc header.
If the first element of pair is false, then the second element is
returned unmodified.
The optional charset is the character set that is used to encode
realname in case realname is not ASCII safe. Can be an instance of str or
a Charset-like object which has a header_encode method. Default is
'utf-8'.
"""
name, address = pair
# The address MUST (per RFC) be ascii, so raise a UnicodeError if it isn't.
address.encode('ascii')
if name:
try:
name.encode('ascii')
except UnicodeEncodeError:
if isinstance(charset, str):
charset = Charset(charset)
encoded_name = charset.header_encode(name)
return "%s <%s>" % (encoded_name, address)
else:
quotes = ''
if specialsre.search(name):
quotes = '"'
name = escapesre.sub(r'\\\g<0>', name)
return '%s%s%s <%s>' % (quotes, name, quotes, address)
return address
def getaddresses(fieldvalues):
"""Return a list of (REALNAME, EMAIL) for each fieldvalue."""
all = COMMASPACE.join(fieldvalues)
a = _AddressList(all)
return a.addresslist
def _format_timetuple_and_zone(timetuple, zone):
return '%s, %02d %s %04d %02d:%02d:%02d %s' % (
['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][timetuple[6]],
timetuple[2],
['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][timetuple[1] - 1],
timetuple[0], timetuple[3], timetuple[4], timetuple[5],
zone)
def formatdate(timeval=None, localtime=False, usegmt=False):
"""Returns a date string as specified by RFC 2822, e.g.:
Fri, 09 Nov 2001 01:08:47 -0000
Optional timeval if given is a floating point time value as accepted by
gmtime() and localtime(), otherwise the current time is used.
Optional localtime is a flag that when True, interprets timeval, and
returns a date relative to the local timezone instead of UTC, properly
taking daylight savings time into account.
Optional argument usegmt means that the timezone is written out as
an ascii string, not numeric one (so "GMT" instead of "+0000"). This
is needed for HTTP, and is only used when localtime==False.
"""
# Note: we cannot use strftime() because that honors the locale and RFC
# 2822 requires that day and month names be the English abbreviations.
if timeval is None:
timeval = time.time()
if localtime or usegmt:
dt = datetime.datetime.fromtimestamp(timeval, datetime.timezone.utc)
else:
dt = datetime.datetime.utcfromtimestamp(timeval)
if localtime:
dt = dt.astimezone()
usegmt = False
return format_datetime(dt, usegmt)
def format_datetime(dt, usegmt=False):
"""Turn a datetime into a date string as specified in RFC 2822.
If usegmt is True, dt must be an aware datetime with an offset of zero. In
this case 'GMT' will be rendered instead of the normal +0000 required by
RFC2822. This is to support HTTP headers involving date stamps.
"""
now = dt.timetuple()
if usegmt:
if dt.tzinfo is None or dt.tzinfo != datetime.timezone.utc:
raise ValueError("usegmt option requires a UTC datetime")
zone = 'GMT'
elif dt.tzinfo is None:
zone = '-0000'
else:
zone = dt.strftime("%z")
return _format_timetuple_and_zone(now, zone)
def make_msgid(idstring=None, domain=None):
"""Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
<142480216486.20800.16526388040877946887@nightshade.la.mastaler.com>
Optional idstring if given is a string used to strengthen the
uniqueness of the message id. Optional domain if given provides the
portion of the message id after the '@'. It defaults to the locally
defined hostname.
"""
timeval = int(time.time()*100)
pid = os.getpid()
randint = random.getrandbits(64)
if idstring is None:
idstring = ''
else:
idstring = '.' + idstring
if domain is None:
domain = socket.getfqdn()
msgid = '<%d.%d.%d%s@%s>' % (timeval, pid, randint, idstring, domain)
return msgid
def parsedate_to_datetime(data):
*dtuple, tz = _parsedate_tz(data)
if tz is None:
return datetime.datetime(*dtuple[:6])
return datetime.datetime(*dtuple[:6],
tzinfo=datetime.timezone(datetime.timedelta(seconds=tz)))
def parseaddr(addr):
"""
Parse addr into its constituent realname and email address parts.
Return a tuple of realname and email address, unless the parse fails, in
which case return a 2-tuple of ('', '').
"""
addrs = _AddressList(addr).addresslist
if not addrs:
return '', ''
return addrs[0]
# rfc822.unquote() doesn't properly de-backslash-ify in Python pre-2.3.
def unquote(str):
"""Remove quotes from a string."""
if len(str) > 1:
if str.startswith('"') and str.endswith('"'):
return str[1:-1].replace('\\\\', '\\').replace('\\"', '"')
if str.startswith('<') and str.endswith('>'):
return str[1:-1]
return str
# RFC2231-related functions - parameter encoding and decoding
def decode_rfc2231(s):
"""Decode string according to RFC 2231"""
parts = s.split(TICK, 2)
if len(parts) <= 2:
return None, None, s
return parts
def encode_rfc2231(s, charset=None, language=None):
"""Encode string according to RFC 2231.
If neither charset nor language is given, then s is returned as-is. If
charset is given but not language, the string is encoded using the empty
string for language.
"""
s = urllib.parse.quote(s, safe='', encoding=charset or 'ascii')
if charset is None and language is None:
return s
if language is None:
language = ''
return "%s'%s'%s" % (charset, language, s)
rfc2231_continuation = re.compile(r'^(?P<name>\w+)\*((?P<num>[0-9]+)\*?)?$',
re.ASCII)
def decode_params(params):
"""Decode parameters list according to RFC 2231.
params is a sequence of 2-tuples containing (param name, string value).
"""
new_params = [params[0]]
# Map parameter's name to a list of continuations. The values are a
# 3-tuple of the continuation number, the string value, and a flag
# specifying whether a particular segment is %-encoded.
rfc2231_params = {}
for name, value in params[1:]:
encoded = name.endswith('*')
value = unquote(value)
mo = rfc2231_continuation.match(name)
if mo:
name, num = mo.group('name', 'num')
if num is not None:
num = int(num)
rfc2231_params.setdefault(name, []).append((num, value, encoded))
else:
new_params.append((name, '"%s"' % quote(value)))
if rfc2231_params:
for name, continuations in rfc2231_params.items():
value = []
extended = False
# Sort by number
continuations.sort()
# And now append all values in numerical order, converting
# %-encodings for the encoded segments. If any of the
# continuation names ends in a *, then the entire string, after
# decoding segments and concatenating, must have the charset and
# language specifiers at the beginning of the string.
for num, s, encoded in continuations:
if encoded:
# Decode as "latin-1", so the characters in s directly
# represent the percent-encoded octet values.
# collapse_rfc2231_value treats this as an octet sequence.
s = urllib.parse.unquote(s, encoding="latin-1")
extended = True
value.append(s)
value = quote(EMPTYSTRING.join(value))
if extended:
charset, language, value = decode_rfc2231(value)
new_params.append((name, (charset, language, '"%s"' % value)))
else:
new_params.append((name, '"%s"' % value))
return new_params
def collapse_rfc2231_value(value, errors='replace',
fallback_charset='us-ascii'):
if not isinstance(value, tuple) or len(value) != 3:
return unquote(value)
# While value comes to us as a unicode string, we need it to be a bytes
# object. We do not want bytes() normal utf-8 decoder, we want a straight
# interpretation of the string as character bytes.
charset, language, text = value
if charset is None:
# Issue 17369: if charset/lang is None, decode_rfc2231 couldn't parse
# the value, so use the fallback_charset.
charset = fallback_charset
rawbytes = bytes(text, 'raw-unicode-escape')
try:
return str(rawbytes, charset, errors)
except LookupError:
# charset is not a known codec.
return unquote(text)
#
# datetime doesn't provide a localtime function yet, so provide one. Code
# adapted from the patch in issue 9527. This may not be perfect, but it is
# better than not having it.
#
def localtime(dt=None, isdst=-1):
"""Return local time as an aware datetime object.
If called without arguments, return current time. Otherwise *dt*
argument should be a datetime instance, and it is converted to the
local time zone according to the system time zone database. If *dt* is
naive (that is, dt.tzinfo is None), it is assumed to be in local time.
In this case, a positive or zero value for *isdst* causes localtime to
presume initially that summer time (for example, Daylight Saving Time)
is or is not (respectively) in effect for the specified time. A
negative value for *isdst* causes the localtime() function to attempt
to divine whether summer time is in effect for the specified time.
"""
if dt is None:
return datetime.datetime.now(datetime.timezone.utc).astimezone()
if dt.tzinfo is not None:
return dt.astimezone()
# We have a naive datetime. Convert to a (localtime) timetuple and pass to
# system mktime together with the isdst hint. System mktime will return
# seconds since epoch.
tm = dt.timetuple()[:-1] + (isdst,)
seconds = time.mktime(tm)
localtm = time.localtime(seconds)
try:
delta = datetime.timedelta(seconds=localtm.tm_gmtoff)
tz = datetime.timezone(delta, localtm.tm_zone)
except AttributeError:
# Compute UTC offset and compare with the value implied by tm_isdst.
# If the values match, use the zone name implied by tm_isdst.
delta = dt - datetime.datetime(*time.gmtime(seconds)[:6])
dst = time.daylight and localtm.tm_isdst > 0
gmtoff = -(time.altzone if dst else time.timezone)
if delta == datetime.timedelta(seconds=gmtoff):
tz = datetime.timezone(delta, time.tzname[dst])
else:
tz = datetime.timezone(delta)
return dt.replace(tzinfo=tz)
|
the-stack_0_26731
|
#!Measurement
'''
baseline:
after: true
before: false
counts: 80
detector: H1
mass: 34.2
settling_time: 15.0
default_fits: average
equilibration:
eqtime: 1.0
inlet: R
inlet_delay: 3
outlet: O
use_extraction_eqtime: true
multicollect:
counts: 20
detector: H1
isotope: Ar40
peakcenter:
after: false
before: false
detector: H1
detectors:
- H1
- AX
- CDD
integration_time: 0.262144
isotope: Ar40
peakhop:
generate_ic_table: false
hops_name: ''
ncycles: 0
use_peak_hop: false
'''
ACTIVE_DETECTORS=('H2','H1','AX','L1','L2','CDD')
def main():
info('unknown measurement script')
activate_detectors(*ACTIVE_DETECTORS)
if mx.peakcenter.before:
peak_center(detector=mx.peakcenter.detector,isotope=mx.peakcenter.isotope)
if mx.baseline.before:
baselines(ncounts=mx.baseline.counts,mass=mx.baseline.mass, detector=mx.baseline.detector,
settling_time=mx.baseline.settling_time)
position_magnet(mx.multicollect.isotope, detector=mx.multicollect.detector)
#sniff the gas during equilibration
if mx.equilibration.use_extraction_eqtime:
eqt = eqtime
else:
eqt = mx.equilibration.eqtime
'''
Equilibrate is non-blocking so use a sniff or sleep as a placeholder
e.g sniff(<equilibration_time>) or sleep(<equilibration_time>)
'''
set_integration_time(1)
equilibrate(eqtime=eqt, inlet=mx.equilibration.inlet, outlet=mx.equilibration.outlet,
delay=mx.equilibration.inlet_delay)
set_time_zero()
sniff(eqt)
set_fits()
set_baseline_fits()
#multicollect on active detectors
multicollect(ncounts=mx.multicollect.counts)
if mx.baseline.after:
baselines(ncounts=mx.baseline.counts,mass=mx.baseline.mass, detector=mx.baseline.detector,
settling_time=mx.baseline.settling_time,
integration_time=8)
if mx.peakcenter.after:
activate_detectors(*mx.peakcenter.detectors, **{'peak_center':True})
peak_center(detector=mx.peakcenter.detector,isotope=mx.peakcenter.isotope,
integration_time=mx.peakcenter.integration_time)
if use_cdd_warming:
gosub('warm_cdd', argv=(mx.equilibration.outlet,))
info('finished measure script')
|
the-stack_0_26732
|
from bs4 import BeautifulSoup
from flask import Flask, render_template, request
import requests
AF_URL = 'https://www.abercrombie.com/webapp/wcs/stores/servlet/Search?storeId=10051&catalogId=10901&langId=-1&departmentCategoryId=10000&search-field='
AT_URL = 'https://www.anntaylor.com/search/searchResults.jsp?question='
app = Flask(__name__)
def get_product(html_doc):
soup = BeautifulSoup(html_doc, "html.parser")
res = soup.find(id="pid-9947721")
return res
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
query = request.form['query']
res_html = requests.get(AF_URL + query)
res = get_product(res_html.text)
return render_template('index.html', res=res)
return render_template('index.html')
if __name__ == '__main__':
app.run()
|
the-stack_0_26735
|
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from pymemcache.client.base import Client
import json
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
client = Client(('127.0.0.1', 11211)) #assumes local memcached application, if needed change this to suit your installation
def misppull(dataType):
headers={'Authorization':'****INSERTYOURMISPAPIKEYHERE****','Accept':'application/json','Content-type':'application/json'}
data=json.dumps({"returnFormat":"json","type":dataType,"tags":"Feed-%","to_ids":"yes","includeEventTags":"yes","includeContext":"yes"})
response = requests.post('https://****INSERTYOURMISPADDRESSHERE****/attributes/restSearch',headers=headers,data=data,verify=False)
return response
if __name__ == '__main__':
dataTypes={'domain', 'ip-%', 'md5', 'sha1','sha256'}
for dt in dataTypes:
response = misppull(dt)
data=response.json()
if data:
for item in data["response"]["Attribute"]:
tagList=[]
for tag in item['Tag']:
for k,v in tag.items():
if(k=='name' and 'Feed-' in tag['name']):
tagList.append(str(v))
client.set(str(item['type'] + '-' + item['value']), tagList, 130)
|
the-stack_0_26736
|
"""Tests for HTMLExporter"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import re
from .base import ExportersTestsBase
from ..html import HTMLExporter
from traitlets.config import Config
from nbformat import v4
class TestHTMLExporter(ExportersTestsBase):
"""Tests for HTMLExporter"""
exporter_class = HTMLExporter
should_include_raw = ['html']
def test_constructor(self):
"""
Can a HTMLExporter be constructed?
"""
HTMLExporter()
def test_export(self):
"""
Can a HTMLExporter export something?
"""
(output, resources) = HTMLExporter().from_filename(self._get_notebook())
assert len(output) > 0
def test_export_basic(self):
"""
Can a HTMLExporter export using the 'basic' template?
"""
(output, resources) = HTMLExporter(template_file='basic').from_filename(self._get_notebook())
assert len(output) > 0
def test_export_full(self):
"""
Can a HTMLExporter export using the 'full' template?
"""
(output, resources) = HTMLExporter(template_file='full').from_filename(self._get_notebook())
assert len(output) > 0
def test_prompt_number(self):
"""
Does HTMLExporter properly format input and output prompts?
"""
(output, resources) = HTMLExporter(template_file='full').from_filename(
self._get_notebook(nb_name="prompt_numbers.ipynb"))
in_regex = r"In \[(.*)\]:"
out_regex = r"Out\[(.*)\]:"
ins = ["2", "10", " ", " ", "0"]
outs = ["10"]
assert re.findall(in_regex, output) == ins
assert re.findall(out_regex, output) == outs
def test_prompt_number(self):
"""
Does HTMLExporter properly format input and output prompts?
"""
no_prompt_conf = Config(
{"TemplateExporter":{
"exclude_input_prompt": True,
"exclude_output_prompt": True,
}
}
)
exporter = HTMLExporter(config=no_prompt_conf, template_file='full')
(output, resources) = exporter.from_filename(
self._get_notebook(nb_name="prompt_numbers.ipynb"))
in_regex = r"In \[(.*)\]:"
out_regex = r"Out\[(.*)\]:"
assert not re.findall(in_regex, output)
assert not re.findall(out_regex, output)
def test_png_metadata(self):
"""
Does HTMLExporter with the 'basic' template treat pngs with width/height metadata correctly?
"""
(output, resources) = HTMLExporter(template_file='basic').from_filename(
self._get_notebook(nb_name="pngmetadata.ipynb"))
check_for_png = re.compile(r'<img src="[^"]*?"([^>]*?)>')
result = check_for_png.search(output)
attr_string = result.group(1)
assert 'width' in attr_string
assert 'height' in attr_string
def test_javascript_output(self):
nb = v4.new_notebook(
cells=[
v4.new_code_cell(
outputs=[v4.new_output(
output_type='display_data',
data={
'application/javascript': "javascript_output();"
}
)]
)
]
)
(output, resources) = HTMLExporter(template_file='basic').from_notebook_node(nb)
self.assertIn('javascript_output', output)
|
the-stack_0_26737
|
from tkinter import Tk , Entry , Button , END
master = Tk()
master.geometry("300x300")
def delete_all() :
input_entry.delete(0 , "end")
def back_space() :
a = input_entry.get()
print(a)
length = len(a)
print(length)
input_entry.delete( length-1 , END )
input_entry = Entry(master, width = 20 )
input_entry.pack(pady = 10)
delete_all_button = Button(master, text = "Delete all", command = delete_all )
delete_all_button.pack(pady = 10 )
back_space_button = Button(master, text = "Back space", command = back_space )
back_space_button.pack(pady = 10 )
master.call( "tk" , "scaling" , 5.0)
master.mainloop()
|
the-stack_0_26738
|
#!/usr/bin/env python3
import sys
import socket
from scapy.layers.dns import DNS, DNSQR
from scapy.layers.inet import IP, UDP, ICMP, IPerror
from scapy.sendrecv import sr1
from converter import Domain, Content
from packet import Packet
from utils import DNSHeaders, init_logger, get_ip_from_hostname
logger = None
class Client:
def __init__(self, domain: str, ip: str, verbosity: int = 0):
self.dns_server = ip
self.domain = domain
self.verb = verbosity
def send(self, message: str):
crafted_domain = f"{Domain.encode(message)}.{self.domain}"
packet = Packet.build_query(
{"dst": self.dns_server, "dns": {"qname": crafted_domain}}, self.domain,
)
answer = sr1(packet.packet, verbose=self.verb, timeout=1)
if answer.haslayer(ICMP) or answer.haslayer(IPerror):
logger.debug(answer.show())
logger.critical("Unreachable host or filtered port")
return None
return answer[DNS] if answer is not None else None
def recv(self, pkt: DNS):
if pkt is not None:
packet = Packet(pkt, self.domain)
for i, (rrname, rdata) in enumerate(packet.answers):
logger.info("Message %i (%s): %s", i, rrname, rdata)
try:
logger.info("Decoded: %s", Content.decode(rdata))
except Exception:
logger.warning("Couldn't decode message")
logger.debug(packet.dns.summary())
else:
logger.warning("Packet was none, most likely timeout")
if __name__ == "__main__":
logger = init_logger()
if len(sys.argv) < 2:
logger.error("Usage: %s hostname [message]", sys.argv[0])
sys.exit(-1)
ip = get_ip_from_hostname(sys.argv[1])
if ip is None:
sys.exit(-1)
client = Client(sys.argv[1], ip)
pkt = client.send("hello world" if len(sys.argv) == 2 else sys.argv[2])
client.recv(pkt)
|
the-stack_0_26741
|
import torch
from torch import nn
import numpy as np
import os
from collections.abc import Iterable
from .utils.detect_face import detect_face, extract_face
class PNet(nn.Module):
"""MTCNN PNet.
Keyword Arguments:
pretrained {bool} -- Whether or not to load saved pretrained weights (default: {True})
"""
def __init__(self, pretrained=True):
super().__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=3)
self.prelu1 = nn.PReLU(10)
self.pool1 = nn.MaxPool2d(2, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(10, 16, kernel_size=3)
self.prelu2 = nn.PReLU(16)
self.conv3 = nn.Conv2d(16, 32, kernel_size=3)
self.prelu3 = nn.PReLU(32)
self.conv4_1 = nn.Conv2d(32, 2, kernel_size=1)
self.softmax4_1 = nn.Softmax(dim=1)
self.conv4_2 = nn.Conv2d(32, 4, kernel_size=1)
self.training = False
if pretrained:
state_dict_path = os.path.join(os.path.dirname(__file__), '../data/pnet.pt')
state_dict = torch.load(state_dict_path)
self.load_state_dict(state_dict)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.conv3(x)
x = self.prelu3(x)
a = self.conv4_1(x)
a = self.softmax4_1(a)
b = self.conv4_2(x)
return b, a
class RNet(nn.Module):
"""MTCNN RNet.
Keyword Arguments:
pretrained {bool} -- Whether or not to load saved pretrained weights (default: {True})
"""
def __init__(self, pretrained=True):
super().__init__()
self.conv1 = nn.Conv2d(3, 28, kernel_size=3)
self.prelu1 = nn.PReLU(28)
self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(28, 48, kernel_size=3)
self.prelu2 = nn.PReLU(48)
self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv3 = nn.Conv2d(48, 64, kernel_size=2)
self.prelu3 = nn.PReLU(64)
self.dense4 = nn.Linear(576, 128)
self.prelu4 = nn.PReLU(128)
self.dense5_1 = nn.Linear(128, 2)
self.softmax5_1 = nn.Softmax(dim=1)
self.dense5_2 = nn.Linear(128, 4)
self.training = False
if pretrained:
state_dict_path = os.path.join(os.path.dirname(__file__), '../data/rnet.pt')
state_dict = torch.load(state_dict_path)
self.load_state_dict(state_dict)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.prelu3(x)
x = x.permute(0, 3, 2, 1).contiguous()
x = self.dense4(x.view(x.shape[0], -1))
x = self.prelu4(x)
a = self.dense5_1(x)
a = self.softmax5_1(a)
b = self.dense5_2(x)
return b, a
class ONet(nn.Module):
"""MTCNN ONet.
Keyword Arguments:
pretrained {bool} -- Whether or not to load saved pretrained weights (default: {True})
"""
def __init__(self, pretrained=True):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3)
self.prelu1 = nn.PReLU(32)
self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.prelu2 = nn.PReLU(64)
self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3)
self.prelu3 = nn.PReLU(64)
self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True)
self.conv4 = nn.Conv2d(64, 128, kernel_size=2)
self.prelu4 = nn.PReLU(128)
self.dense5 = nn.Linear(1152, 256)
self.prelu5 = nn.PReLU(256)
self.dense6_1 = nn.Linear(256, 2)
self.softmax6_1 = nn.Softmax(dim=1)
self.dense6_2 = nn.Linear(256, 4)
self.dense6_3 = nn.Linear(256, 10)
self.training = False
if pretrained:
state_dict_path = os.path.join(os.path.dirname(__file__), '../data/onet.pt')
state_dict = torch.load(state_dict_path)
self.load_state_dict(state_dict)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.prelu3(x)
x = self.pool3(x)
x = self.conv4(x)
x = self.prelu4(x)
x = x.permute(0, 3, 2, 1).contiguous()
x = self.dense5(x.view(x.shape[0], -1))
x = self.prelu5(x)
a = self.dense6_1(x)
a = self.softmax6_1(a)
b = self.dense6_2(x)
c = self.dense6_3(x)
return b, c, a
class MTCNN(nn.Module):
"""MTCNN face detection module.
This class loads pretrained P-, R-, and O-nets and, given raw input images as PIL images,
returns images cropped to include the face only. Cropped faces can optionally be saved to file
also.
Keyword Arguments:
image_size {int} -- Output image size in pixels. The image will be square. (default: {160})
margin {int} -- Margin to add to bounding box, in terms of pixels in the final image.
Note that the application of the margin differs slightly from the davidsandberg/facenet
repo, which applies the margin to the original image before resizing, making the margin
dependent on the original image size (this is a bug in davidsandberg/facenet).
(default: {0})
min_face_size {int} -- Minimum face size to search for. (default: {20})
thresholds {list} -- MTCNN face detection thresholds (default: {[0.6, 0.7, 0.7]})
factor {float} -- Factor used to create a scaling pyramid of face sizes. (default: {0.709})
post_process {bool} -- Whether or not to post process images tensors before returning. (default: {True})
select_largest {bool} -- If True, if multiple faces are detected, the largest is returned.
If False, the face with the highest detection probability is returned. (default: {True})
keep_all {bool} -- If True, all detected faces are returned, in the order dictated by the
select_largest parameter. If a save_path is specified, the first face is saved to that
path and the remaining faces are saved to <save_path>1, <save_path>2 etc.
device {torch.device} -- The device on which to run neural net passes. Image tensors and
models are copied to this device before running forward passes. (default: {None})
"""
def __init__(
self, image_size=160, margin=0, min_face_size=20,
thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=True,
select_largest=True, keep_all=False, device=None
):
super().__init__()
self.image_size = image_size
self.margin = margin
self.min_face_size = min_face_size
self.thresholds = thresholds
self.factor = factor
self.post_process = post_process
self.select_largest = select_largest
self.keep_all = keep_all
self.pnet = PNet()
self.rnet = RNet()
self.onet = ONet()
self.device = torch.device('cpu')
if device is not None:
self.device = device
self.to(device)
def forward(self, img, save_path=None, return_prob=False):
"""Run MTCNN face detection on a PIL image. This method performs both detection and
extraction of faces, returning tensors representing detected faces rather than the bounding
boxes. To access bounding boxes, see the MTCNN.detect() method below.
Arguments:
img {PIL.Image or list} -- A PIL image or a list of PIL images.
Keyword Arguments:
save_path {str} -- An optional save path for the cropped image. Note that when
self.post_process=True, although the returned tensor is post processed, the saved face
image is not, so it is a true representation of the face in the input image.
If `img` is a list of images, `save_path` should be a list of equal length.
(default: {None})
return_prob {bool} -- Whether or not to return the detection probability.
(default: {False})
Returns:
Union[torch.Tensor, tuple(torch.tensor, float)] -- If detected, cropped image of a face
with dimensions 3 x image_size x image_size. Optionally, the probability that a
face was detected. If self.keep_all is True, n detected faces are returned in an
n x 3 x image_size x image_size tensor with an optional list of detection
probabilities. If `img` is a list of images, the item(s) returned have an extra
dimension (batch) as the first dimension.
Example:
>>> from facenet_pytorch import MTCNN
>>> mtcnn = MTCNN()
>>> face_tensor, prob = mtcnn(img, save_path='face.png', return_prob=True)
"""
# Detect faces
with torch.no_grad():
batch_boxes, batch_probs = self.detect(img)
# Determine if a batch or single image was passed
batch_mode = True
if not isinstance(img, Iterable):
img = [img]
batch_boxes = [batch_boxes]
batch_probs = [batch_probs]
batch_mode = False
# Parse save path(s)
if save_path is not None:
if isinstance(save_path, str):
save_path = [save_path]
else:
save_path = [None for _ in range(len(img))]
# Process all bounding boxes and probabilities
faces, probs = [], []
for im, box_im, prob_im, path_im in zip(img, batch_boxes, batch_probs, save_path):
if box_im is None:
faces.append(None)
probs.append([None] if self.keep_all else None)
continue
if not self.keep_all:
box_im = box_im[[0]]
faces_im = []
for i, box in enumerate(box_im):
face_path = path_im
if path_im is not None and i > 0:
save_name, ext = os.path.splitext(path_im)
face_path = save_name + '_' + str(i + 1) + ext
face = extract_face(im, box, self.image_size, self.margin, face_path)
if self.post_process:
face = fixed_image_standardization(face)
faces_im.append(face)
if self.keep_all:
faces_im = torch.stack(faces_im)
else:
faces_im = faces_im[0]
prob_im = prob_im[0]
faces.append(faces_im)
probs.append(prob_im)
if not batch_mode:
faces = faces[0]
probs = probs[0]
if return_prob:
return faces, probs
else:
return faces
def detect(self, img, landmarks=False):
"""Detect all faces in PIL image and return bounding boxes and optional facial landmarks.
This method is used by the forward method and is also useful for face detection tasks
that require lower-level handling of bounding boxes and facial landmarks (e.g., face
tracking). The functionality of the forward function can be emulated by using this method
followed by the extract_face() function.
Arguments:
img {PIL.Image or list} -- A PIL image or a list of PIL images.
Keyword Arguments:
landmarks {bool} -- Whether to return facial landmarks in addition to bounding boxes.
(default: {False})
Returns:
tuple(numpy.ndarray, list) -- For N detected faces, a tuple containing an
Nx4 array of bounding boxes and a length N list of detection probabilities.
Returned boxes will be sorted in descending order by detection probability if
self.select_largest=False, otherwise the largest face will be returned first.
If `img` is a list of images, the items returned have an extra dimension
(batch) as the first dimension. Optionally, a third item, the facial landmarks,
are returned if `landmarks=True`.
Example:
>>> from PIL import Image, ImageDraw
>>> from facenet_pytorch import MTCNN, extract_face
>>> mtcnn = MTCNN(keep_all=True)
>>> boxes, probs, points = mtcnn.detect(img, landmarks=True)
>>> # Draw boxes and save faces
>>> img_draw = img.copy()
>>> draw = ImageDraw.Draw(img_draw)
>>> for i, (box, point) in enumerate(zip(boxes, points)):
... draw.rectangle(box.tolist(), width=5)
... for p in point:
... draw.rectangle((p - 10).tolist() + (p + 10).tolist(), width=10)
... extract_face(img, box, save_path='detected_face_{}.png'.format(i))
>>> img_draw.save('annotated_faces.png')
"""
with torch.no_grad():
batch_boxes, batch_points = detect_face(
img, self.min_face_size,
self.pnet, self.rnet, self.onet,
self.thresholds, self.factor,
self.device
)
boxes, probs, points = [], [], []
for box, point in zip(batch_boxes, batch_points):
box = np.array(box)
point = np.array(point)
if len(box) == 0:
boxes.append(None)
probs.append([None])
points.append(None)
elif self.select_largest:
box_order = np.argsort((box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1]))[::-1]
box = box[box_order]
point = point[box_order]
boxes.append(box[:, :4])
probs.append(box[:, 4])
points.append(point)
else:
boxes.append(box[:, :4])
probs.append(box[:, 4])
points.append(point)
boxes = np.array(boxes)
probs = np.array(probs)
points = np.array(points)
if not isinstance(img, Iterable):
boxes = boxes[0]
probs = probs[0]
points = points[0]
if landmarks:
return boxes, probs, points
return boxes, probs
def fixed_image_standardization(image_tensor):
processed_tensor = (image_tensor - 127.5) / 128.0
return processed_tensor
def prewhiten(x):
mean = x.mean()
std = x.std()
std_adj = std.clamp(min=1.0/(float(x.numel())**0.5))
y = (x - mean) / std_adj
return y
|
the-stack_0_26742
|
# removeLineBreaks.py
#
# Demonstration of the pyparsing module, converting text files
# with hard line-breaks to text files with line breaks only
# between paragraphs. (Helps when converting downloads from Project
# Gutenberg - https://www.gutenberg.org/ - to import to word processing apps
# that can reformat paragraphs once hard line-breaks are removed.)
#
# Uses parse actions and transformString to remove unwanted line breaks,
# and to double up line breaks between paragraphs.
#
# Copyright 2006, by Paul McGuire
#
import pyparsing as pp
line_end = pp.LineEnd()
# define an expression for the body of a line of text - use a predicate condition to
# accept only lines with some content.
def mustBeNonBlank(t):
return t[0] != ''
# could also be written as
# return bool(t[0])
lineBody = pp.SkipTo(line_end).addCondition(mustBeNonBlank, message="line body can't be empty")
# now define a line with a trailing lineEnd, to be replaced with a space character
textLine = lineBody + line_end().setParseAction(pp.replaceWith(" "))
# define a paragraph, with a separating lineEnd, to be replaced with a double newline
para = pp.OneOrMore(textLine) + line_end().setParseAction(pp.replaceWith("\n\n"))
# run a test
test = """
Now is the
time for
all
good men
to come to
the aid of their
country.
"""
print(para.transformString(test))
# process an entire file
# Project Gutenberg EBook of Successful Methods of Public Speaking, by Grenville Kleiser
# Download from http://www.gutenberg.org/cache/epub/18095/pg18095.txt
#
with open("18095-8.txt") as source_file:
original = source_file.read()
# use transformString to convert line breaks
transformed = para.transformString(original)
with open("18095-8_reformatted.txt", "w") as transformed_file:
transformed_file.write(transformed)
|
the-stack_0_26744
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Serial port support for Windows.
Requires PySerial and pywin32.
"""
# system imports
import serial
from serial import PARITY_NONE, PARITY_EVEN, PARITY_ODD
from serial import STOPBITS_ONE, STOPBITS_TWO
from serial import FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS
import win32file, win32event
# twisted imports
from twisted.internet import abstract
# sibling imports
from serialport import BaseSerialPort
class SerialPort(BaseSerialPort, abstract.FileDescriptor):
"""A serial device, acting as a transport, that uses a win32 event."""
connected = 1
def __init__(self, protocol, deviceNameOrPortNumber, reactor,
baudrate = 9600, bytesize = EIGHTBITS, parity = PARITY_NONE,
stopbits = STOPBITS_ONE, xonxoff = 0, rtscts = 0):
self._serial = self._serialFactory(
deviceNameOrPortNumber, baudrate=baudrate, bytesize=bytesize,
parity=parity, stopbits=stopbits, timeout=None,
xonxoff=xonxoff, rtscts=rtscts)
self.flushInput()
self.flushOutput()
self.reactor = reactor
self.protocol = protocol
self.outQueue = []
self.closed = 0
self.closedNotifies = 0
self.writeInProgress = 0
self.protocol = protocol
self._overlappedRead = win32file.OVERLAPPED()
self._overlappedRead.hEvent = win32event.CreateEvent(None, 1, 0, None)
self._overlappedWrite = win32file.OVERLAPPED()
self._overlappedWrite.hEvent = win32event.CreateEvent(None, 0, 0, None)
self.reactor.addEvent(self._overlappedRead.hEvent, self, 'serialReadEvent')
self.reactor.addEvent(self._overlappedWrite.hEvent, self, 'serialWriteEvent')
self.protocol.makeConnection(self)
self._finishPortSetup()
def _finishPortSetup(self):
"""
Finish setting up the serial port.
This is a separate method to facilitate testing.
"""
flags, comstat = win32file.ClearCommError(self._serial.hComPort)
rc, self.read_buf = win32file.ReadFile(self._serial.hComPort,
win32file.AllocateReadBuffer(1),
self._overlappedRead)
def serialReadEvent(self):
#get that character we set up
n = win32file.GetOverlappedResult(self._serial.hComPort, self._overlappedRead, 0)
if n:
first = str(self.read_buf[:n])
#now we should get everything that is already in the buffer
flags, comstat = win32file.ClearCommError(self._serial.hComPort)
if comstat.cbInQue:
win32event.ResetEvent(self._overlappedRead.hEvent)
rc, buf = win32file.ReadFile(self._serial.hComPort,
win32file.AllocateReadBuffer(comstat.cbInQue),
self._overlappedRead)
n = win32file.GetOverlappedResult(self._serial.hComPort, self._overlappedRead, 1)
#handle all the received data:
self.protocol.dataReceived(first + str(buf[:n]))
else:
#handle all the received data:
self.protocol.dataReceived(first)
#set up next one
win32event.ResetEvent(self._overlappedRead.hEvent)
rc, self.read_buf = win32file.ReadFile(self._serial.hComPort,
win32file.AllocateReadBuffer(1),
self._overlappedRead)
def write(self, data):
if data:
if self.writeInProgress:
self.outQueue.append(data)
else:
self.writeInProgress = 1
win32file.WriteFile(self._serial.hComPort, data, self._overlappedWrite)
def serialWriteEvent(self):
try:
dataToWrite = self.outQueue.pop(0)
except IndexError:
self.writeInProgress = 0
return
else:
win32file.WriteFile(self._serial.hComPort, dataToWrite, self._overlappedWrite)
def connectionLost(self, reason):
"""
Called when the serial port disconnects.
Will call C{connectionLost} on the protocol that is handling the
serial data.
"""
self.reactor.removeEvent(self._overlappedRead.hEvent)
self.reactor.removeEvent(self._overlappedWrite.hEvent)
abstract.FileDescriptor.connectionLost(self, reason)
self._serial.close()
self.protocol.connectionLost(reason)
|
the-stack_0_26746
|
from sqlalchemy import Column, Integer, MetaData, String, Table, create_engine
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.engine.url import URL
from sqlalchemy.util import immutabledict
import sqlalchemy_postgresql_audit
NAMING_CONVENTIONS = immutabledict(
{
"ix": "ix_%(column_0_label)s",
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s",
"audit.table": "%(table_name)s_audr",
}
)
# Event listeners must be enabled before tables are added to the Metadata Object
sqlalchemy_postgresql_audit.enable()
meta = MetaData(naming_convention=NAMING_CONVENTIONS)
t = Table(
"foo",
meta,
Column("bar", String),
Column("baz", String),
info={
"audit.options": {
"enabled": True,
"session_settings": [
Column("username", String, nullable=False),
Column("app_uuid", UUID),
],
}
},
schema="public",
)
r = Table(
"bar",
meta,
Column("foo", String),
info={"audit.options": {"enabled": True}},
schema="public",
)
print("Tables: ", meta.tables)
url = URL(
drivername="postgresql+psycopg2",
host="localhost",
port=5432,
password="postgres",
username="postgres",
)
engine = create_engine(url)
engine.echo = True
meta.bind = engine
meta.create_all()
sqlalchemy_postgresql_audit.install_audit_triggers(meta)
|
the-stack_0_26747
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from typing import Any, Dict, Optional
from airflow.providers.http.operators.http import SimpleHttpOperator
from airflow.providers.slack.hooks.slack_webhook import SlackWebhookHook
from airflow.utils.decorators import apply_defaults
class SlackWebhookOperator(SimpleHttpOperator):
"""
This operator allows you to post messages to Slack using incoming webhooks.
Takes both Slack webhook token directly and connection that has Slack webhook token.
If both supplied, http_conn_id will be used as base_url,
and webhook_token will be taken as endpoint, the relative path of the url.
Each Slack webhook token can be pre-configured to use a specific channel, username and
icon. You can override these defaults in this hook.
:param http_conn_id: connection that has Slack webhook token in the extra field
:type http_conn_id: str
:param webhook_token: Slack webhook token
:type webhook_token: str
:param message: The message you want to send on Slack
:type message: str
:param attachments: The attachments to send on Slack. Should be a list of
dictionaries representing Slack attachments.
:type attachments: list
:param blocks: The blocks to send on Slack. Should be a list of
dictionaries representing Slack blocks.
:type blocks: list
:param channel: The channel the message should be posted to
:type channel: str
:param username: The username to post to slack with
:type username: str
:param icon_emoji: The emoji to use as icon for the user posting to Slack
:type icon_emoji: str
:param icon_url: The icon image URL string to use in place of the default icon.
:type icon_url: str
:param link_names: Whether or not to find and link channel and usernames in your
message
:type link_names: bool
:param proxy: Proxy to use to make the Slack webhook call
:type proxy: str
"""
template_fields = [
'webhook_token',
'message',
'attachments',
'blocks',
'channel',
'username',
'proxy',
]
# pylint: disable=too-many-arguments
@apply_defaults
def __init__(
self,
*,
http_conn_id: str,
webhook_token: Optional[str] = None,
message: str = "",
attachments: Optional[list] = None,
blocks: Optional[list] = None,
channel: Optional[str] = None,
username: Optional[str] = None,
icon_emoji: Optional[str] = None,
icon_url: Optional[str] = None,
link_names: bool = False,
proxy: Optional[str] = None,
**kwargs,
) -> None:
super().__init__(endpoint=webhook_token, **kwargs)
self.http_conn_id = http_conn_id
self.webhook_token = webhook_token
self.message = message
self.attachments = attachments
self.blocks = blocks
self.channel = channel
self.username = username
self.icon_emoji = icon_emoji
self.icon_url = icon_url
self.link_names = link_names
self.proxy = proxy
self.hook: Optional[SlackWebhookHook] = None
def execute(self, context: Dict[str, Any]) -> None:
"""Call the SlackWebhookHook to post the provided Slack message"""
self.hook = SlackWebhookHook(
self.http_conn_id,
self.webhook_token,
self.message,
self.attachments,
self.blocks,
self.channel,
self.username,
self.icon_emoji,
self.icon_url,
self.link_names,
self.proxy,
)
self.hook.execute()
|
the-stack_0_26749
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
HTW-PV3 - Export weather data for POLYSUN
Export weatherdata for POLYSUN
SPDX-License-Identifier: AGPL-3.0-or-later
"""
__copyright__ = "© Ludwig Hülk"
__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)"
__url__ = "https://www.gnu.org/licenses/agpl-3.0.en.html"
__author__ = "Ludee;"
__version__ = "v0.0.2"
import pandas as pd
from pv3_weatherdata import calculate_diffuse_irradiation
from settings import HTW_LAT, HTW_LON
from settings import write_to_csv
import logging
log = logging.getLogger(__name__)
def export_htw_polysun(df_weather, filename, resolution, parameter_name):
"""TODO: Docu"""
# calc diffuse irradiation
df_dhi = calculate_diffuse_irradiation(df_weather, parameter_name, HTW_LAT, HTW_LON)
if resolution == 'M':
s = 60
steps = 525600
elif resolution == 'H':
s = 3600
steps = 8760
time = list(zip(range(steps), [s * i for i in range(steps)]))
polysun = {}
polysun = {'# Time [s]': dict(time)}
polysun.update(
df_weather.loc[:,
['g_hor_si', 't_luft', 'v_wind', 'h_luft']].reset_index(
drop=True).to_dict())
df_polysun = pd.DataFrame.from_dict(polysun)
df_polysun = df_polysun.merge(df_dhi['dhi'].reset_index(drop=True),
left_index=True, right_index=True)
# zero values as not needed
df_polysun['Lh [W/m²]'] = 0 # Lh Langwellenstrahlung[Wh / m2]
# rename columns
PRINT_NAMES = {'g_hor_si': 'Gh [W/m²]', # Gh Globalstrahlung [Wh/m2]
'dhi': 'Dh [W/m²]', # Dh Diffusstrahlung [Wh/m2]
't_luft': 'Tamb [°C]', # Tamb Umgebungstemperatur [°C]
'v_wind': 'Vwnd [m/s]', # Vwnd Windgeschwindigkeit [m/s]
'h_luft': 'Hrel [%]', # Hrel Luftfeuchtigkeit [%]
}
df_polysun = df_polysun.rename(columns=PRINT_NAMES)
df_polysun = df_polysun.loc[:,
['# Time [s]', 'Gh [W/m²]', 'Dh [W/m²]', 'Tamb [°C]',
'Lh [W/m²]', 'Vwnd [m/s]', 'Hrel [%]']]
df_polysun = df_polysun.round(1)
write_to_csv(f'./data/{filename}', df_polysun, append=False,
index=False)
## 1. Todo Doku
polysun_first_row = '# Station: HTW Berlin, PVlib\n'
## 2. Todo Doku
polysun_second_row = f'# Latitude: {HTW_LAT:.4f} Longitude: {HTW_LON:.4f} altitude: 81m\n'
## 3. Todo Doku
polysun_third_row = '#'
with open(f'./data/{filename}', "r+") as text_file:
content = text_file.read()
text_file.seek(0, 0)
text_file.write(
polysun_first_row + polysun_second_row + polysun_third_row + '\n' + content)
log.info(f'Write data to file: {filename}')
def export_fred_polysun(df, filename, resolution):
"""converts open_FRED data into HTW_Weatherdata format"""
# dhi doesnt have to be calculated as it is already integrated
# resample
if resolution == 'M':
s = 60
steps = 525600
elif resolution == 'H':
s = 3600
steps = 8760
df = df.resample('H').mean()
# 1 additional hour found, reduce to 8760 h
df = df.loc['2015-01-01 00:00':'2015-12-31 23:00']
df['h_luft'] = 0
# rename columns
column_names = {'ghi': 'Gh [W/m²]', # Gh Globalstrahlung [Wh/m2]
'dhi': 'Dh [W/m²]', # Dh Diffusstrahlung [Wh/m2]
'temp_air': 'Tamb [°C]', # Tamb Umgebungstemperatur [°C]
'wind_speed': 'Vwnd [m/s]', # Vwnd Windgeschwindigkeit [m/s]
'h_luft': 'Hrel [%]', # Hrel Luftfeuchtigkeit [%]
}
df_open_fred = df.rename(columns=column_names)
df_open_fred['Lh [W/m²]'] = 0 # Lh Langwellenstrahlung[Wh / m2]
fred_lat = df_open_fred['lat'][0]
fred_lon = df_open_fred['lon'][0]
time = list(zip(range(steps), [s * i for i in range(steps)]))
polysun = {'# Time [s]': dict(time)}
polysun.update(
df_open_fred.loc[:,
[ 'Gh [W/m²]', 'Dh [W/m²]', 'Tamb [°C]',
'Lh [W/m²]', 'Vwnd [m/s]', 'Hrel [%]']]
.reset_index(
drop=True).to_dict())
df_polysun = pd.DataFrame.from_dict(polysun)
write_to_csv(f'./data/{filename}', df_polysun, append=False, index=False)
## 1. Todo Doku
polysun_first_row = '# Open_FRED Wetter Stundenmittelwerte 2015\n'
## 2. Todo Doku
# Todo altitude
polysun_second_row = f'# Latitude: {fred_lat:.4f} Longitude: {fred_lon:.4f} altitude: 81m\n'
## 3. Todo Doku
polysun_third_row = '#'
with open(f'./data/{filename}', "r+") as text_file:
content = text_file.read()
text_file.seek(0, 0)
text_file.write(polysun_first_row + polysun_second_row + polysun_third_row + '\n' + content)
log.info(f'Write data to file: {filename}')
# deprecated
def convert_open_FRED(file_name):
"""converts open_FRED data into HTW_Weatherdata format"""
# read open_fred_weather_data
htw_weatherdata_names = {"ghi": "g_hor_si",
"wind_speed": 'v_wind',
"temp_air": 't_luft',
}
df_open_fred = pd.read_csv(file_name, index_col=0,
date_parser=pd.to_datetime)
df_open_fred = df_open_fred.resample('H').mean()
# 1 additional hour found, reduce to 8760 h
df_open_fred = df_open_fred.loc['2015-01-01 00:00':'2015-12-31 23:00']
df_open_fred['h_luft'] = 0
df_open_fred = df_open_fred.rename(columns=htw_weatherdata_names)
lat = df_open_fred['lat'][0]
lon = df_open_fred['lon'][0]
return df_open_fred, lat, lon
|
the-stack_0_26750
|
from django.contrib.auth import get_user_model
from django.http import Http404
from django.shortcuts import render, get_object_or_404, redirect, reverse
from django.utils import timezone
from django.views.generic import TemplateView
from .models import Article, Commentaires, Tags
from .form import CommentForm, ArticleForm
from django.contrib.auth.decorators import login_required, user_passes_test, permission_required
User = get_user_model()
@login_required
def timeline(request):
"""
Afficher tous les articles de notre blog, link à timeline/timeline.html
"""
# Article.objects.create(titre="Mon premier article", contenu_post="La dure vie d'un étudiant confiné, tome 1")
posts = Article.objects.all()
print(Tags.objects.get(text_tag='test'))
args = {'posts': posts}
return render(request, 'timeline/timeline.html', args)
@login_required
@permission_required('Article.add_article')
def add_article(request):
"""
Ajoute un nouvel article
"""
if request.method == "POST":
form = ArticleForm(request.POST, request.FILES)
if form.is_valid():
new_titre = form.cleaned_data['titre']
new_auteur = form.cleaned_data['auteur']
new_photo = form.cleaned_data['photo']
new_post = form.cleaned_data['contenu_post']
new_tags = form.cleaned_data['tags']
print(new_tags, type(new_tags))
Articl = Article.objects.create(titre=new_titre,
auteur=new_auteur,
contenu_post=new_post,
photo=new_photo)
Articl.save()
for tag in new_tags:
Articl.tags.add(tag)
Articl.save()
# Reverse :
return redirect(reverse('timeline-home'))
else:
print('_______________' + 'PAS VALIDE')
return render(request, 'timeline/add.html')
else:
form = ArticleForm()
args = {'form': form}
return render(request, 'timeline/add.html', args)
@login_required
def lire(request, id):
"""
Permet de lire un post en particulier en fonction de son ID. Accès via timeline/timeline.html
"""
try:
post = Article.objects.get(id=id)
comments = Commentaires.objects.filter(id_post=id)
except post.DoesNotExist:
raise Http404
# COMMENTAIRES
form = CommentForm(request.POST)
if form.is_valid():
new_comment = form.cleaned_data['contenu_comm']
truc = request.user
com = Commentaires.objects.create(contenu_comm=new_comment, id_post=post, id_user=truc)
com.save()
comments = Commentaires.objects.filter(id_post=id) # Actualise liste commentaires
args = {'post': post, 'comments': comments, 'form': form}
return render(request, 'timeline/lire.html', args)
@login_required
def search_timeline(request): # TODO : Chercher selon les tags
"""
Selectionne les articles correspondant aux champs de recherche
"""
# Article.objects.create(titre="Mon premier article", contenu_post="La dure vie d'un étudiant confiné, tome 1")
posts = Article.objects.filter(id_post=id)
return render(request, 'timeline/timeline.html', {'posts': posts})
# def add_comment_to_post(request, pk):
# post = get_object_or_404(Article, pk=pk)
# if request.method == "POST":
# form = CommentForm(request.POST)
# if form.is_valid():
# comment = form.save(commit=False)
# comment.post = post
# comment.save()
# return redirect('post_detail', pk=post.pk)
# else:
# form = CommentForm()
# return render(request, 'timeline/add_comment_to_post.html', {'form': form})
# class LireView(TemplateView):
# template_name = "timeline/lire.html"
#
# def get(self, request):
# form = CommentForm()
# return render(request, self.template_name, {'form': form})
#
# def post(self, request):
# form = CommentForm(request.POST)
# if form.is_valid():
# your_comment = form.cleaned_data['your_comment']
# form = CommentForm()
#
#
# args = {'form': form, 'your_comment': your_comment}
# return render(request, self.template_name, args)
|
the-stack_0_26751
|
# Author: Guilherme Aldeia
# Contact: [email protected]
# Version: 1.0.0
# Last modified: 10-16-2021 by Guilherme Aldeia
"""Auxiliary methods to deal with the results files and parallel workers.
The experiments to fit and explain regression methods with explanatory
methods are implemented in_experiments, with methods to perform and save each
tipe of explanation.
However, properly executing all experiments in parallel processes (without
repetition) and result files management need to be implemented as simple as
possible.
This script implements methods for controlling the parallel processes by
setting up all the structure needed to save the results, creating a progress
tracking file and providing easy verification of finished experiments.
Also, the experiments are wrapped in a worker function to make possible to
run experiments in parallel.
"""
import os
import pandas as pd
from datetime import datetime
from filelock import FileLock
from _experiments import (run_or_retrieve_gridsearch,
regressor_experiment, groundtruth_experiment)
def setup_environment(results_path):
"""Creates the folder structure to save the results, using as root
the given results_path. Inside the root, the tracking file and the
parallel write lock of the results files will be created."""
print("Now creating all folders and files needed to control the "
"execution of the experiments...")
if not os.path.exists(results_path):
os.makedirs(results_path)
subfolders = [
'1.gridsearch',
'2.regression',
'3.explanation/3.1.local/3.1.1.traindata',
'3.explanation/3.1.local/3.1.2.testdata',
'3.explanation/3.2.global/3.2.1.traindata',
'3.explanation/3.2.global/3.2.2.testdata',
]
for subfolder in subfolders:
if not os.path.exists(f"{results_path}/{subfolder}"):
os.makedirs(f"{results_path}/{subfolder}")
# Creating lock in a path that is known to all subprocesses
open(f'{results_path}/_experiments_lock.lock', 'w+')
# Columns of the tracking file
columns = ['dataset', 'regressor_name', 'rep_number', 'end_time', 'finished']
tracking_file = f'{results_path}/_experiments_finished_executions.csv'
tracking_df = pd.DataFrame(columns=columns)
if os.path.isfile(tracking_file):
tracking_df = pd.read_csv(tracking_file)
else:
# creating in case it does not exists
tracking_df.to_csv(tracking_file, index=False)
_clean_unfinished_reports(results_path)
def _report_started_experiment(ds_name, regressor_name, rep_number, results_path):
"""Method that takes as argument the data set name, regressor, repetition
number and the path where the results are and updates the tracking file
to inform that the experiment with the given configurations has started.
"""
columns = ['dataset', 'regressor_name', 'rep_number', 'end_time', 'finished']
tracking_file = f'{results_path}/_experiments_finished_executions.csv'
tracking_df = pd.DataFrame(columns=columns)
with FileLock(f'{results_path}/_experiments_lock.lock'):
if os.path.isfile(tracking_file):
tracking_df = pd.read_csv(tracking_file)
else:
tracking_df.to_csv(tracking_file, index=False)
new_entry = pd.Series({
'dataset': ds_name,
'regressor_name' : regressor_name,
'end_time' : "not finished",
'rep_number': rep_number,
'finished' : False
})
tracking_df = tracking_df.append(new_entry, ignore_index=True)
tracking_df = tracking_df.sort_values('finished', ascending=False)
tracking_df.to_csv(tracking_file, index=False)
def _report_finished_experiment(
ds_name, regressor_name, rep_number, results_path):
"""Method that takes as argument the data set name, regressor, repetition
number and the path where the results are and updates the tracking file
to inform that the experiment with the given configurations is now finished.
"""
columns = ['dataset', 'regressor_name', 'rep_number', 'end_time', 'finished']
tracking_file = f'{results_path}/_experiments_finished_executions.csv'
tracking_df = pd.DataFrame(columns=columns)
with FileLock(f'{results_path}/_experiments_lock.lock'):
if os.path.isfile(tracking_file):
tracking_df = pd.read_csv(tracking_file)
else:
tracking_df.to_csv(tracking_file, index=False)
# Dropping the previous information about the experiment
# (this should exist, since we only report a finished experiment if
# it has been started)
tracking_df = tracking_df.drop(tracking_df[
(tracking_df['dataset']==ds_name) &
(tracking_df['regressor_name']==regressor_name) &
(tracking_df['rep_number']==rep_number) &
(tracking_df['finished']==False)].index)
new_entry = pd.Series({
'dataset': ds_name,
'regressor_name' : regressor_name,
'end_time' : datetime.now().strftime("%d/%m/%Y-%H:%M:%S"),
'rep_number': rep_number,
'finished' : True
})
tracking_df = tracking_df.append(new_entry, ignore_index=True)
tracking_df = tracking_df.sort_values('finished', ascending=False)
tracking_df.to_csv(tracking_file, index=False)
def _is_finished_experiment(ds_name, regressor_name, rep_number, results_path):
"""Method that takes as argument the data set name, regressor, repetition
number and the path where the results are and checks if the experiment
with the given configurations is already finished.
"""
tracking_file = f'{results_path}/_experiments_finished_executions.csv'
with FileLock(f'{results_path}/_experiments_lock.lock'):
if os.path.isfile(tracking_file):
tracking_df = pd.read_csv(tracking_file)
return len(tracking_df[
(tracking_df['dataset']==ds_name) &
(tracking_df['regressor_name']==regressor_name) &
(tracking_df['rep_number']==rep_number) &
(tracking_df['finished']==True)])>=1
else:
return False
def _clean_unfinished_reports(results_path):
"""Abrupt interruptions of the experiment script can leave unfinished
experiments in the tracking file. This method will clean them up.
"""
columns = ['dataset', 'regressor_name', 'rep_number', 'end_time', 'finished']
tracking_file = f'{results_path}/_experiments_finished_executions.csv'
tracking_df = pd.DataFrame(columns=columns)
with FileLock(f'{results_path}/_experiments_lock.lock'):
if os.path.isfile(tracking_file):
tracking_df = pd.read_csv(tracking_file)
else:
tracking_df.to_csv(tracking_file, index=False)
tracking_df = tracking_df.drop(tracking_df[
tracking_df['finished']==False].index)
tracking_df.to_csv(tracking_file, index=False)
def worker_gridsearch(ds_name, regressor_class, results_path, datasets_path):
"""Worker to perform the gridsearch in parallel processes using the
'processing' module. This worker takes as argument the data set and
regressor to be optimized, where the results should be saved, and
where to find the feynman data sets.
"""
# Gridsearch results are simple enough to be checked within the
# gridsearch method. There is no verification here.
# However, the 'processing' package does not support a parallel map
# with named arguments, so this worker provides this simplification.
run_or_retrieve_gridsearch(
ds_name = ds_name,
regressor_class = regressor_class,
results_path = results_path,
datasets_path = datasets_path
)
return
def worker_experiment(ds_name, regressor_class, explainer_classes, rep_number,
results_path, datasets_path, n_local_explanations, metrics_factor):
"""Worker to perform one experiment in parallel processes using the
'processing' module. This worker takes as argument the data set, the
regressor to be fitted, a list of explainers to be used in the
experiment, the number of this repetition of experiments, where the results
should be saved, where to find the feynman data sets, the number of
local explanations (max=100) to perform, and the neighborhood size factor.
"""
# If already finished, skip
if _is_finished_experiment(
ds_name, regressor_class.__name__, rep_number, results_path):
return
# Reporting that this experiment has started
_report_started_experiment(
ds_name, regressor_class.__name__, rep_number, results_path)
# Performing the experiment
regressor_experiment(
ds_name = ds_name,
regressor_class = regressor_class,
explainer_classes = explainer_classes,
rep_number = rep_number,
n_local_explanations = n_local_explanations,
metrics_factor = metrics_factor,
results_path = results_path,
datasets_path = datasets_path
)
# Updating the status of this experiment
_report_finished_experiment(
ds_name, regressor_class.__name__, rep_number, results_path)
return
def worker_groundtruth(ds_name, feynman_regressor, explainer_classes, rep_number,
results_path, datasets_path, n_local_explanations, metrics_factor):
"""Worker to perform one ground-truth experiment in parallel processes
using the 'processing' module. This worker takes as argument the data set,
the feynman regressor class, a list of explainers to be used in the
experiment, the number of this repetition of experiments, where the results
should be saved, where to find the feynman data sets, the number of
local explanations (max=100) to perform, and the neighborhood size factor.
"""
if _is_finished_experiment(
ds_name, feynman_regressor.__name__, rep_number, results_path):
return
_report_started_experiment(
ds_name, feynman_regressor.__name__, rep_number, results_path)
groundtruth_experiment(
ds_name = ds_name,
feynman_regressor = feynman_regressor,
explainer_classes = explainer_classes,
rep_number = rep_number,
n_local_explanations = n_local_explanations,
metrics_factor = metrics_factor,
results_path = results_path,
datasets_path = datasets_path,
)
_report_finished_experiment(
ds_name, feynman_regressor.__name__, rep_number, results_path)
return
|
the-stack_0_26753
|
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: RainbowSecret
## Microsoft Research
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import pdb
import cv2
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from lib.models.backbones.backbone_selector import BackboneSelector
from lib.models.tools.module_helper import ModuleHelper
from lib.utils.helpers.offset_helper import DTOffsetConfig
from lib.models.backbones.hrnet.hrnet_backbone import BasicBlock
class SegFix_HRNet(nn.Module):
def __init__(self, configer):
super(SegFix_HRNet, self).__init__()
self.configer = configer
self.backbone = BackboneSelector(configer).get_backbone()
backbone_name = self.configer.get('network', 'backbone')
width = int(backbone_name[-2:])
if 'hrnet' in backbone_name:
in_channels = width * 15
else:
in_channels = width * 31
num_masks = 2
num_directions = DTOffsetConfig.num_classes
mid_channels = 256
self.dir_head = nn.Sequential(
nn.Conv2d(in_channels,
mid_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False),
ModuleHelper.BNReLU(mid_channels,
bn_type=self.configer.get(
'network', 'bn_type')),
nn.Conv2d(mid_channels,
num_directions,
kernel_size=1,
stride=1,
padding=0,
bias=False))
self.mask_head = nn.Sequential(
nn.Conv2d(in_channels,
mid_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False),
ModuleHelper.BNReLU(mid_channels,
bn_type=self.configer.get(
'network', 'bn_type')),
nn.Conv2d(mid_channels,
num_masks,
kernel_size=1,
stride=1,
padding=0,
bias=False))
def forward(self, x_):
x = self.backbone(x_)
_, _, h, w = x[0].size()
feat1 = x[0]
for i in range(1, len(x)):
x[i] = F.interpolate(x[i],
size=(h, w),
mode='bilinear',
align_corners=True)
feats = torch.cat(x, 1)
mask_map = self.mask_head(feats)
dir_map = self.dir_head(feats)
return mask_map, dir_map
|
the-stack_0_26754
|
import tensorflow as tf
def initial(cell_number,n_actions,n_state,learning_rate,model_i,round_size):
tf.reset_default_graph()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
input = tf.placeholder(tf.float32, [None, round_size, n_state], name="input_x") # 1*30
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=cell_number, state_is_tuple=True)
_, final_state = tf.nn.dynamic_rnn(cell=lstm_cell, inputs=input, dtype=tf.float32)
W3 = tf.get_variable("W3", shape=[cell_number, n_actions],
initializer=tf.contrib.layers.xavier_initializer())
B3 = tf.get_variable("B3", shape=[1, n_actions],
initializer=tf.constant_initializer())
score = tf.matmul(final_state[1], W3) + B3
probability = tf.nn.softmax(score)
r = tf.placeholder(tf.float32, [None, n_actions], name="input_r")
constant = tf.placeholder(tf.float32, name="Constant")
same_state_sum = -tf.reduce_sum(r * constant * probability, axis=1)
loss = tf.reduce_sum(same_state_sum, axis=0)
train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
saver.save(sess, './model_' + str(model_i) + '/initial.ckpt')
|
the-stack_0_26755
|
import collections
from celery.result import AsyncResult
from flask import Flask, redirect, render_template, request
from flask.json import jsonify
from tasks import make_celery
import glob
import itertools
import json
import os
import re
import sys
import subprocess
import time
app = Flask(__name__)
app.config.from_object('config')
celery = make_celery(app)
@celery.task(bind=True)
def generate(self, text="", sharpen_preset="Off", width=832, height=512, steps=250, out_name=None, init_image=""):
kwargs = {
'text': text,
'root_path': 'out_diffusion',
'setup': False,
'out_name': out_name or str(int(time.time())),
'sharpen_preset': sharpen_preset,
'width': int(width),
'height': int(height),
'init_image': init_image,
'steps': int(steps),
'skip_steps': 0,
'inter_saves': 3
}
sys.path.append(".")
from diffuse import main
from argparse import Namespace
main(argparse_args=Namespace(**kwargs), task=self)
@app.route("/tasks", methods=["POST"])
def run_task():
task = generate.delay(**request.form)
return redirect(f'/?task_id={task.id}')
@app.route("/tasks/<task_id>")
def get_status(task_id):
task_result = AsyncResult(task_id)
result = {
"task_id": task_id,
"task_status": task_result.status,
"task_result": task_result.result,
}
return jsonify(result)
def read_settings(fname):
with open(fname) as f:
settings = json.load(f)
return settings.get("text_prompts", {}).get('0', [])
def get_gpus():
p = subprocess.run("nvidia-smi --query-gpu=name,uuid --format=csv,noheader", capture_output=True, shell=True)
rows = filter(None, p.stdout.decode("utf-8").split("\n"))
rows = [row.split(", ") for row in rows]
return [
{"name": row[0], "uuid": row[1]}
for row in rows
]
@app.route("/")
def get_generated():
desired_prompt = request.args.get("prompt", "")
data_root = os.environ.get("DATA_ROOT", ".")
srcs = [
src[len(data_root):]
for src in glob.glob(os.path.join(data_root, "out_diffusion/images_out/*/*.png"))
]
srcs.reverse()
thumbs = [src.replace(".png", ".png!md") for src in srcs]
ids = [re.search(r"out_diffusion/images_out/([^/]+)/.+\.png$", src).group(1) for src in srcs]
prompts = [
str(read_settings(fname))
for fname in glob.glob(os.path.join(os.environ.get("DATA_ROOT", "./"), "out_diffusion/images_out/*/*_settings.txt"), recursive=True)
]
prompts.reverse()
prompt_attempts = collections.defaultdict(list)
for prompt, img_id, thumb in zip(prompts, ids, thumbs):
prompt_attempts[prompt].append(f"<a href=\"#{img_id}\"><img src=\"{thumb}\" /></a>")
form = render_template("form.html")
poll = render_template("poll.html")
index = "<ul>" + "\n".join([f"<li>{prompt}<br/>{''.join(links)}</li>" for prompt, links in prompt_attempts.items()]) + "</ul>"
images = [
f"<figure id=\"{img_id}\"><img src=\"{src}\" alt=\"{prompt}\"/><figcaption>{prompt}</figcaption></figure>"
for img_id, src, prompt in zip(ids, srcs, prompts)
]
return poll + form + index + (f"{desired_prompt}" if desired_prompt else "") + "\n".join(images)
@app.route('/info')
def info():
resp = {
'connecting_ip': request.headers['X-Real-IP'],
'proxy_ip': request.headers['X-Forwarded-For'],
'host': request.headers['Host'],
'user-agent': request.headers['User-Agent']
}
return jsonify(resp)
@app.route('/flask-health-check')
def flask_health_check():
return "success"
|
the-stack_0_26756
|
import os
def printFenGeFu(msg:str):
print("*"*10,msg,"*"*10)
printFenGeFu(" create dir ")
os.mkdir("./testMkdir")
printFenGeFu(" delete dir ")
os.rmdir("./testMkdir")
printFenGeFu(" file text write ")
content=''' i am a programer and lucky dog
do you love me?
'''
file=open(r"./text.txt",'w')
file.write(content)
file.close()
printFenGeFu(" file text read ")
fr=open(r"./text.txt")
while True:
line=fr.readline()
if len(line)==0:
break
print(line)
fr.close()
|
the-stack_0_26757
|
import mmcv
from mmcv.image import tensor2imgs
from mmdet.core import bbox_mapping
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .base import BaseDetector
@DETECTORS.register_module()
class RPN(BaseDetector):
"""Implementation of Region Proposal Network."""
def __init__(self,
backbone,
neck,
rpn_head,
train_cfg,
test_cfg,
pretrained=None):
super(RPN, self).__init__()
self.backbone = build_backbone(backbone)
self.neck = build_neck(neck) if neck is not None else None
rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
rpn_head.update(train_cfg=rpn_train_cfg)
rpn_head.update(test_cfg=test_cfg.rpn)
self.rpn_head = build_head(rpn_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
def init_weights(self, pretrained=None):
"""Initialize the weights in detector.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
super(RPN, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
self.neck.init_weights()
self.rpn_head.init_weights()
def extract_feat(self, img):
"""Extract features.
Args:
img (torch.Tensor): Image tensor with shape (n, c, h ,w).
Returns:
list[torch.Tensor]: Multi-level features that may have
different resolutions.
"""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
"""Dummy forward function."""
x = self.extract_feat(img)
rpn_outs = self.rpn_head(x)
return rpn_outs
def forward_train(self,
img,
img_metas,
gt_bboxes=None,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
if (isinstance(self.train_cfg.rpn, dict)
and self.train_cfg.rpn.get('debug', False)):
self.rpn_head.debug_imgs = tensor2imgs(img)
x = self.extract_feat(img)
losses = self.rpn_head.forward_train(x, img_metas, gt_bboxes, None,
gt_bboxes_ignore)
return losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test time augmentation.
Args:
imgs (list[torch.Tensor]): List of multiple images
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[np.ndarray]: proposals
"""
x = self.extract_feat(img)
proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
if rescale:
for proposals, meta in zip(proposal_list, img_metas):
proposals[:, :4] /= proposals.new_tensor(meta['scale_factor'])
return [proposal.cpu().numpy() for proposal in proposal_list]
def aug_test(self, imgs, img_metas, rescale=False):
"""Test function with test time augmentation.
Args:
imgs (list[torch.Tensor]): List of multiple images
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[np.ndarray]: proposals
"""
proposal_list = self.rpn_head.aug_test_rpn(
self.extract_feats(imgs), img_metas)
if not rescale:
for proposals, img_meta in zip(proposal_list, img_metas[0]):
img_shape = img_meta['img_shape']
scale_factor = img_meta['scale_factor']
flip = img_meta['flip']
flip_direction = img_meta['flip_direction']
proposals[:, :4] = bbox_mapping(proposals[:, :4], img_shape,
scale_factor, flip,
flip_direction)
return [proposal.cpu().numpy() for proposal in proposal_list]
def show_result(self, data, result, top_k=20, **kwargs):
"""Show RPN proposals on the image.
Args:
data (str or np.ndarray): Image filename or loaded image.
result (Tensor or tuple): The results to draw over `img`
bbox_result or (bbox_result, segm_result).
top_k (int): Plot the first k bboxes only
if set positive. Default: 20
Returns:
np.ndarray: The image with bboxes drawn on it.
"""
mmcv.imshow_bboxes(data, result, top_k=top_k)
|
the-stack_0_26759
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name='family',
parent_name='choropleth.colorbar.titlefont',
**kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='colorbars',
no_blank=True,
role='style',
strict=True,
**kwargs
)
|
the-stack_0_26760
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pretend
import pytest
import uuid
from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound
from webob.multidict import MultiDict, NoVars
from warehouse.admin.views import users as views
from warehouse.packaging.models import Project
from ....common.db.accounts import User, UserFactory, EmailFactory
from ....common.db.packaging import JournalEntryFactory, ProjectFactory, RoleFactory
class TestUserList:
def test_no_query(self, db_request):
users = sorted(
[UserFactory.create() for _ in range(30)], key=lambda u: u.username.lower()
)
result = views.user_list(db_request)
assert result == {"users": users[:25], "query": None}
def test_with_page(self, db_request):
users = sorted(
[UserFactory.create() for _ in range(30)], key=lambda u: u.username.lower()
)
db_request.GET["page"] = "2"
result = views.user_list(db_request)
assert result == {"users": users[25:], "query": None}
def test_with_invalid_page(self):
request = pretend.stub(params={"page": "not an integer"})
with pytest.raises(HTTPBadRequest):
views.user_list(request)
def test_basic_query(self, db_request):
users = sorted(
[UserFactory.create() for _ in range(5)], key=lambda u: u.username.lower()
)
db_request.GET["q"] = users[0].username
result = views.user_list(db_request)
assert result == {"users": [users[0]], "query": users[0].username}
def test_wildcard_query(self, db_request):
users = sorted(
[UserFactory.create() for _ in range(5)], key=lambda u: u.username.lower()
)
db_request.GET["q"] = users[0].username[:-1] + "%"
result = views.user_list(db_request)
assert result == {"users": [users[0]], "query": users[0].username[:-1] + "%"}
def test_email_query(self, db_request):
users = sorted(
[UserFactory.create() for _ in range(5)], key=lambda u: u.username.lower()
)
emails = [EmailFactory.create(user=u, primary=True) for u in users]
db_request.GET["q"] = "email:" + emails[0].email
result = views.user_list(db_request)
assert result == {"users": [users[0]], "query": "email:" + emails[0].email}
def test_or_query(self, db_request):
users = sorted(
[UserFactory.create() for _ in range(5)], key=lambda u: u.username.lower()
)
emails = [EmailFactory.create(user=u, primary=True) for u in users]
db_request.GET["q"] = " ".join(
[
users[0].username,
users[1].username[:-1] + "%",
"email:" + emails[2].email,
"email:" + emails[3].email[:-5] + "%",
]
)
result = views.user_list(db_request)
assert result == {"users": users[:4], "query": db_request.GET["q"]}
def test_ignores_invalid_query(self, db_request):
users = sorted(
[UserFactory.create() for _ in range(5)], key=lambda u: u.username.lower()
)
db_request.GET["q"] = "foobar:what"
result = views.user_list(db_request)
assert result == {"users": users, "query": "foobar:what"}
class TestUserDetail:
def test_404s_on_nonexistant_user(self, db_request):
user = UserFactory.create()
user_id = uuid.uuid4()
while user.id == user_id:
user_id = uuid.uuid4()
db_request.matchdict["user_id"] = str(user_id)
with pytest.raises(HTTPNotFound):
views.user_detail(db_request)
def test_gets_user(self, db_request):
email = EmailFactory.create(primary=True)
user = UserFactory.create(emails=[email])
project = ProjectFactory.create()
roles = sorted([RoleFactory(project=project, user=user, role_name="Owner")])
db_request.matchdict["user_id"] = str(user.id)
db_request.POST = NoVars()
result = views.user_detail(db_request)
assert result["user"] == user
assert result["roles"] == roles
assert result["form"].emails[0].primary.data
def test_updates_user(self, db_request):
user = UserFactory.create()
db_request.matchdict["user_id"] = str(user.id)
db_request.method = "POST"
db_request.POST["name"] = "Jane Doe"
db_request.POST = MultiDict(db_request.POST)
db_request.current_route_path = pretend.call_recorder(
lambda: "/admin/users/{}/".format(user.id)
)
resp = views.user_detail(db_request)
assert resp.status_code == 303
assert resp.location == "/admin/users/{}/".format(user.id)
assert user.name == "Jane Doe"
class TestUserDelete:
def test_deletes_user(self, db_request, monkeypatch):
user = UserFactory.create()
project = ProjectFactory.create()
another_project = ProjectFactory.create()
journal = JournalEntryFactory(submitted_by=user)
RoleFactory(project=project, user=user, role_name="Owner")
deleted_user = UserFactory.create(username="deleted-user")
db_request.matchdict["user_id"] = str(user.id)
db_request.params = {"username": user.username}
db_request.route_path = pretend.call_recorder(lambda a: "/foobar")
db_request.user = UserFactory.create()
db_request.remote_addr = "10.10.10.10"
result = views.user_delete(db_request)
db_request.db.flush()
assert not db_request.db.query(User).get(user.id)
assert db_request.db.query(Project).all() == [another_project]
assert db_request.route_path.calls == [pretend.call("admin.user.list")]
assert result.status_code == 303
assert result.location == "/foobar"
assert journal.submitted_by == deleted_user
def test_deletes_user_bad_confirm(self, db_request, monkeypatch):
user = UserFactory.create()
project = ProjectFactory.create()
RoleFactory(project=project, user=user, role_name="Owner")
db_request.matchdict["user_id"] = str(user.id)
db_request.params = {"username": "wrong"}
db_request.route_path = pretend.call_recorder(lambda a, **k: "/foobar")
result = views.user_delete(db_request)
db_request.db.flush()
assert db_request.db.query(User).get(user.id)
assert db_request.db.query(Project).all() == [project]
assert db_request.route_path.calls == [
pretend.call("admin.user.detail", user_id=user.id)
]
assert result.status_code == 303
assert result.location == "/foobar"
|
the-stack_0_26761
|
"""Test PyPlumIO lambda structure."""
from pyplumio.structures.lambda_ import (
LAMBDA_LEVEL,
LAMBDA_STATUS,
LAMBDA_TARGET,
from_bytes,
)
_empty = bytearray([0xFF])
_message = bytearray([0x1, 0x2, 0x28, 0x0])
_data = {LAMBDA_STATUS: 1, LAMBDA_TARGET: 2, LAMBDA_LEVEL: 40}
def test_from_bytes_empty():
"""Test conversion from bytes with empty data."""
data, offset = from_bytes(_empty)
assert data == {}
assert offset == 1
def test_from_bytes():
"""Test conversion from bytes."""
data, offset = from_bytes(_message)
assert data == _data
assert offset == 4
|
the-stack_0_26763
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.errorreporting_v1beta1.services.error_stats_service import pagers
from google.cloud.errorreporting_v1beta1.types import common
from google.cloud.errorreporting_v1beta1.types import error_stats_service
from .transports.base import ErrorStatsServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import ErrorStatsServiceGrpcAsyncIOTransport
from .client import ErrorStatsServiceClient
class ErrorStatsServiceAsyncClient:
"""An API for retrieving and managing error statistics as well
as data for individual events.
"""
_client: ErrorStatsServiceClient
DEFAULT_ENDPOINT = ErrorStatsServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = ErrorStatsServiceClient.DEFAULT_MTLS_ENDPOINT
error_group_path = staticmethod(ErrorStatsServiceClient.error_group_path)
parse_error_group_path = staticmethod(
ErrorStatsServiceClient.parse_error_group_path
)
common_billing_account_path = staticmethod(
ErrorStatsServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
ErrorStatsServiceClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(ErrorStatsServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(
ErrorStatsServiceClient.parse_common_folder_path
)
common_organization_path = staticmethod(
ErrorStatsServiceClient.common_organization_path
)
parse_common_organization_path = staticmethod(
ErrorStatsServiceClient.parse_common_organization_path
)
common_project_path = staticmethod(ErrorStatsServiceClient.common_project_path)
parse_common_project_path = staticmethod(
ErrorStatsServiceClient.parse_common_project_path
)
common_location_path = staticmethod(ErrorStatsServiceClient.common_location_path)
parse_common_location_path = staticmethod(
ErrorStatsServiceClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ErrorStatsServiceAsyncClient: The constructed client.
"""
return ErrorStatsServiceClient.from_service_account_info.__func__(ErrorStatsServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ErrorStatsServiceAsyncClient: The constructed client.
"""
return ErrorStatsServiceClient.from_service_account_file.__func__(ErrorStatsServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return ErrorStatsServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> ErrorStatsServiceTransport:
"""Returns the transport used by the client instance.
Returns:
ErrorStatsServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(ErrorStatsServiceClient).get_transport_class, type(ErrorStatsServiceClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, ErrorStatsServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the error stats service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.ErrorStatsServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = ErrorStatsServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def list_group_stats(
self,
request: Union[error_stats_service.ListGroupStatsRequest, dict] = None,
*,
project_name: str = None,
time_range: error_stats_service.QueryTimeRange = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListGroupStatsAsyncPager:
r"""Lists the specified groups.
.. code-block::
from google.cloud import errorreporting_v1beta1
def sample_list_group_stats():
# Create a client
client = errorreporting_v1beta1.ErrorStatsServiceClient()
# Initialize request argument(s)
request = errorreporting_v1beta1.ListGroupStatsRequest(
project_name="project_name_value",
)
# Make the request
page_result = client.list_group_stats(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.errorreporting_v1beta1.types.ListGroupStatsRequest, dict]):
The request object. Specifies a set of `ErrorGroupStats`
to return.
project_name (:class:`str`):
Required. The resource name of the Google Cloud Platform
project. Written as ``projects/{projectID}`` or
``projects/{projectNumber}``, where ``{projectID}`` and
``{projectNumber}`` can be found in the `Google Cloud
Console <https://support.google.com/cloud/answer/6158840>`__.
Examples: ``projects/my-project-123``,
``projects/5551234``.
This corresponds to the ``project_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
time_range (:class:`google.cloud.errorreporting_v1beta1.types.QueryTimeRange`):
Optional. List data for the given time range. If not
set, a default time range is used. The field
time_range_begin in the response will specify the
beginning of this time range. Only ErrorGroupStats with
a non-zero count in the given time range are returned,
unless the request contains an explicit group_id list.
If a group_id list is given, also ErrorGroupStats with
zero occurrences are returned.
This corresponds to the ``time_range`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.errorreporting_v1beta1.services.error_stats_service.pagers.ListGroupStatsAsyncPager:
Contains a set of requested error
group stats.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_name, time_range])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = error_stats_service.ListGroupStatsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_name is not None:
request.project_name = project_name
if time_range is not None:
request.time_range = time_range
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_group_stats,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("project_name", request.project_name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListGroupStatsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def list_events(
self,
request: Union[error_stats_service.ListEventsRequest, dict] = None,
*,
project_name: str = None,
group_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListEventsAsyncPager:
r"""Lists the specified events.
.. code-block::
from google.cloud import errorreporting_v1beta1
def sample_list_events():
# Create a client
client = errorreporting_v1beta1.ErrorStatsServiceClient()
# Initialize request argument(s)
request = errorreporting_v1beta1.ListEventsRequest(
project_name="project_name_value",
group_id="group_id_value",
)
# Make the request
page_result = client.list_events(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.errorreporting_v1beta1.types.ListEventsRequest, dict]):
The request object. Specifies a set of error events to
return.
project_name (:class:`str`):
Required. The resource name of the Google Cloud Platform
project. Written as ``projects/{projectID}``, where
``{projectID}`` is the `Google Cloud Platform project
ID <https://support.google.com/cloud/answer/6158840>`__.
Example: ``projects/my-project-123``.
This corresponds to the ``project_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
group_id (:class:`str`):
Required. The group for which events
shall be returned.
This corresponds to the ``group_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.errorreporting_v1beta1.services.error_stats_service.pagers.ListEventsAsyncPager:
Contains a set of requested error
events.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_name, group_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = error_stats_service.ListEventsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_name is not None:
request.project_name = project_name
if group_id is not None:
request.group_id = group_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_events,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("project_name", request.project_name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListEventsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def delete_events(
self,
request: Union[error_stats_service.DeleteEventsRequest, dict] = None,
*,
project_name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> error_stats_service.DeleteEventsResponse:
r"""Deletes all error events of a given project.
.. code-block::
from google.cloud import errorreporting_v1beta1
def sample_delete_events():
# Create a client
client = errorreporting_v1beta1.ErrorStatsServiceClient()
# Initialize request argument(s)
request = errorreporting_v1beta1.DeleteEventsRequest(
project_name="project_name_value",
)
# Make the request
response = client.delete_events(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.errorreporting_v1beta1.types.DeleteEventsRequest, dict]):
The request object. Deletes all events in the project.
project_name (:class:`str`):
Required. The resource name of the Google Cloud Platform
project. Written as ``projects/{projectID}``, where
``{projectID}`` is the `Google Cloud Platform project
ID <https://support.google.com/cloud/answer/6158840>`__.
Example: ``projects/my-project-123``.
This corresponds to the ``project_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.errorreporting_v1beta1.types.DeleteEventsResponse:
Response message for deleting error
events.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = error_stats_service.DeleteEventsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_name is not None:
request.project_name = project_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_events,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("project_name", request.project_name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-errorreporting",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("ErrorStatsServiceAsyncClient",)
|
the-stack_0_26766
|
import logging
import os
import platform
import time
from enum import Enum
from typing import NoReturn, Union, Optional
import selenium.webdriver.chrome.options as c_op
import selenium.webdriver.chrome.webdriver as c_wd
import selenium.webdriver.firefox.options as f_op
import selenium.webdriver.firefox.webdriver as f_wd
from selenium import webdriver
from selenium.common.exceptions import WebDriverException, TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
from SessionHandler.SessionObject import SessionObject, IndexedDB
class Browser(Enum):
CHROME = 'chrome'
FIREFOX = 'firefox'
class SessionHandler:
__browser_choice = 0
__browser_options: Union[c_op.Options, f_op.Options]
__browser_profile_list: 'list[str]'
__browser_user_dir: str
__custom_driver = False
__driver: Union[c_wd.WebDriver, f_wd.WebDriver] = None
__log: logging.Logger
__session: SessionObject
def __refresh_profile_list(self) -> NoReturn:
if not self.__custom_driver:
if os.path.isdir(self.__browser_user_dir):
self.__log.debug('Getting browser profiles...')
if self.__browser_choice == Browser.CHROME:
self.__browser_profile_list = ['']
for profile_dir in os.listdir(self.__browser_user_dir):
if 'profile' in profile_dir.lower():
if profile_dir != 'System Profile':
self.__browser_profile_list.append(profile_dir)
elif self.__browser_choice == Browser.FIREFOX:
# TODO: consider reading out the profiles.ini
self.__browser_profile_list = []
for profile_dir in os.listdir(self.__browser_user_dir):
if not profile_dir.endswith('.default'):
if os.path.isdir(os.path.join(self.__browser_user_dir, profile_dir)):
self.__browser_profile_list.append(profile_dir)
self.__log.debug('Browser profiles registered.')
else:
self.__log.error('Browser user dir does not exist.')
self.__browser_profile_list = []
else:
# TODO: Figure out why I did that before
raise AssertionError('Do not call this method while using a custom driver.')
def __init_browser(self) -> NoReturn:
self.__custom_driver = False
self.__log.debug('Setting browser user dirs...')
if self.__browser_choice == Browser.CHROME:
self.__browser_options = webdriver.ChromeOptions()
if self.__platform == 'windows':
self.__browser_user_dir = os.path.join(
os.environ['USERPROFILE'], 'Appdata', 'Local', 'Google', 'Chrome', 'User Data')
elif self.__platform == 'linux':
self.__browser_user_dir = os.path.join(os.environ['HOME'], '.config', 'google-chrome')
elif self.__browser_choice == Browser.FIREFOX:
self.__browser_options = webdriver.FirefoxOptions()
if self.__platform == 'windows':
self.__browser_user_dir = os.path.join(os.environ['APPDATA'], 'Mozilla', 'Firefox', 'Profiles')
self.__browser_profile_list = os.listdir(self.__browser_user_dir)
elif self.__platform == 'linux':
self.__browser_user_dir = os.path.join(os.environ['HOME'], '.mozilla', 'firefox')
self.__log.debug('Browser user dirs set.')
self.__browser_options.headless = True
self.__refresh_profile_list()
def __get_cookies(self) -> dict:
self.__log.debug('Executing getCookies function...')
cookie_list = self.__driver.execute_script('''
return document.cookie.split("; ");
''')
cookie_dict = {}
for cookie in cookie_list:
if len(cookie) == 0:
continue
cookie = cookie.split("=", maxsplit=1)
cookie_dict[cookie[0]] = cookie[1]
return cookie_dict
def __set_cookies(self, cookie_dict: dict[str, str]) -> NoReturn:
cookie_string = ""
for key, value in cookie_dict.items():
cookie_string += f"{key}={value}; "
self.__driver.execute_script('''
document.cookie = arguments[0];
''', cookie_string)
def __get_local_storage(self) -> 'dict[str, str]':
self.__log.debug('Executing getLocalStorage function...')
return self.__driver.execute_script('''
var localStorageDict = {};
var ls = window.localStorage;
for (var i = 0; i < ls.length; i++) {
localStorageDict[ls.key(i)] = ls.getItem(ls.key(i));
}
return localStorageDict;
''')
def __set_local_storage(self, local_storage_dict: 'dict[str, str]') -> NoReturn:
for ls_key, ls_val in local_storage_dict.items():
self.__driver.execute_script('window.localStorage.setItem(arguments[0], arguments[1]);',
ls_key, ls_val)
def __get_indexed_db(self) -> IndexedDB:
idb_dict = {'url': self.__session.get_url(), 'databases': {}}
idb_db_names = self.__session.get_idb_db_names(self.__driver)
if self.__session.idb_special_treatment:
self.__log.info("IDB special treatment required.")
idb_st_layout = self.__session.get_idb_st_layout()
else:
idb_st_layout = None
self.__log.debug('Executing getIndexedDb function...')
# FIXME: Use driver.execute_async_script() in the future
self.__driver.execute_script('''
document.pySessionHandler = {};
document.pySessionHandler.idbObject = {};
document.pySessionHandler.idbReady = 0;
document.pySessionHandler.idbNames = arguments[0];
document.pySessionHandler.idbStLayout = arguments[1];
// This could be so easy
// https://developer.mozilla.org/en-US/docs/Web/API/IDBFactory/databases#browser_compatibility
// indexedDB.databases();
async function getAllIndexedDBs() {
for (const dbName of document.pySessionHandler.idbNames) {
document.pySessionHandler.idbObject[dbName] = {};
document.pySessionHandler.db = await new Promise((resolve, reject) => {
document.pySessionHandler.openRequest = indexedDB.open(dbName);
document.pySessionHandler.openRequest.onsuccess = _ => resolve(
document.pySessionHandler.openRequest.result
);
});
document.pySessionHandler.idbObject[dbName]['name'] = document.pySessionHandler.db.name;
document.pySessionHandler.idbObject[dbName]['version'] = document.pySessionHandler.db.version;
document.pySessionHandler.idbObject[dbName]['objectStores'] = {};
for (const objectStoreName of document.pySessionHandler.db.objectStoreNames) {
document.pySessionHandler.idbObject[dbName]['objectStores'][objectStoreName] = {};
document.pySessionHandler.osTransaction = document.pySessionHandler.db.transaction(
objectStoreName
);
document.pySessionHandler.objectStore = document.pySessionHandler.osTransaction.objectStore(
objectStoreName
);
document.pySessionHandler.idbObject[dbName]['objectStores'][objectStoreName] = {};
document.pySessionHandler.idbObject[dbName]['objectStores'][objectStoreName]['name'] =
objectStoreName;
document.pySessionHandler.idbObject[dbName]['objectStores'][objectStoreName]['indices'] = {};
for (const idbIndexName of Array.from(document.pySessionHandler.objectStore.indexNames)) {
idbIndex = document.pySessionHandler.objectStore.index(idbIndexName);
document.pySessionHandler.idbObject[dbName]['objectStores'][objectStoreName]['indices'][
idbIndex.name
] = {'unique': idbIndex.unique, 'keyPath': idbIndex.keyPath, 'multiEntry': idbIndex.multiEntry};
}
document.pySessionHandler.idbObject[dbName]['objectStores'][objectStoreName]['keyPath'] =
document.pySessionHandler.objectStore.keyPath;
document.pySessionHandler.idbObject[dbName]['objectStores'][objectStoreName]['autoIncrement'] =
document.pySessionHandler.objectStore.autoIncrement;
document.pySessionHandler.osName = objectStoreName;
if (document.pySessionHandler.idbStLayout != null &&
document.pySessionHandler.idbStLayout[dbName] != undefined &&
document.pySessionHandler.idbStLayout[dbName][document.pySessionHandler.osName] != undefined) {
document.pySessionHandler.idbObject[dbName]['objectStores'][document.pySessionHandler.osName]['data']
= [];
}
else {
document.pySessionHandler.idbObject[dbName]['objectStores'][document.pySessionHandler.osName]['data']
= await new Promise((resolve, reject) => {
document.pySessionHandler.osGetAllRequest = document.pySessionHandler.objectStore.getAll();
document.pySessionHandler.osGetAllRequest.onsuccess =
_ => resolve(document.pySessionHandler.osGetAllRequest.result);
});
}
}
document.pySessionHandler.db.close();
document.pySessionHandler.idbReady++;
}
}
getAllIndexedDBs();
''', idb_db_names, idb_st_layout)
self.__log.debug('Waiting until IDB operation is done...')
while not self.__driver.execute_script(
f'return document.pySessionHandler.idbReady == {len(idb_db_names)};'):
time.sleep(1)
self.__log.debug('Getting IDB results...')
idb_dict['databases'] = self.__driver.execute_script(
'return document.pySessionHandler.idbObject;')
self.__driver.execute_script('document.pySessionHandler = {};')
if idb_st_layout is not None:
self.__log.info("Running special actions...")
st_data = self.__session.do_idb_st_get_action(self.__driver)
for idb_st_db, idb_st_os_list in idb_st_layout.items():
for idb_st_os in idb_st_os_list:
idb_dict['databases'][idb_st_db]['objectStores'][idb_st_os]['data'] = st_data[idb_st_db][idb_st_os]
return IndexedDB.create_from_dict(idb_dict)
def __set_indexed_db(self, idb: IndexedDB) -> NoReturn:
self.__log.debug('Inserting setIDBObjects function...')
self.__driver.execute_script('''
document.pySessionHandler = {};
// Reference PoC: https://github.com/jeliebig/WaWebSessionHandler/issues/15#issuecomment-893716129
// PoC by: https://github.com/thewh1teagle
document.pySessionHandler.setAllObjects = async function (idb_data, stLayout) {
idb_dbs = idb_data['databases'];
for (const [idbDbName, idbDbProps] of Object.entries(idb_dbs)) {
await new Promise((resolve, reject) => {
deleteRequest = indexedDB.deleteDatabase(idbDbName);
deleteRequest.onsuccess = _ => resolve(_);
deleteRequest.onerror = _ => resolve(_);
});
await new Promise((resolve, reject) => {
openRequest = indexedDB.open(idbDbName, idbDbProps['version']);
openRequest.onupgradeneeded = async function(event) {
db = event.target.result;
for (const [idbOsName, idbOsProps] of Object.entries(idbDbProps['objectStores'])) {
console.log("OS:", idbOsName, idbOsProps);
objectStoreOptions = {
autoIncrement: idbOsProps['autoIncrement']
};
if (idbOsProps['keyPath'].length > 0){
objectStoreOptions['keyPath'] = (idbOsProps['keyPath'].length == 1 ?
idbOsProps['keyPath'].join('') : idbOsProps['keyPath']);
}
objectStore = db.createObjectStore(idbOsName, objectStoreOptions);
containsUniqueIndex = false;
for (const [idbIndexName, idbIndexOptions] of Object.entries(idbOsProps['indices'])) {
if (idbIndexOptions['unique']) {
containsUniqueIndex = true;
}
objectStore.createIndex(idbIndexName, idbIndexOptions['keyPath'],{
unique: idbIndexOptions['unique'],
multiEntry: idbIndexOptions['multiEntry']
});
}
if (!(stLayout != null &&
stLayout[idbDbName] != undefined && stLayout[idbDbName][idbOsName] != undefined)) {
i = 1;
for (const idbOsData of idbOsProps['data']) {
if (containsUniqueIndex || idbOsProps['keyPath'].length > 0) {
await new Promise((dResolve, dReject) => {
addRequest = objectStore.add(idbOsData);
addRequest.onsuccess = _ => dResolve();
});
}
else {
await new Promise((dResolve, dReject) => {
addRequest = objectStore.add(idbOsData, i);
addRequest.onsuccess = _ => dResolve();
});
}
i++;
}
}
}
db.close();
resolve();
}
});
}
};
document.pySessionHandler.setAllObjectsAsync = async function(idb_data, stLayout, resolve) {
console.log(idb_data, stLayout);
await document.pySessionHandler.setAllObjects(idb_data, stLayout);
resolve();
};
''')
self.__log.debug('setIDBObjects function inserted.')
self.__log.info('Writing IDB data...')
self.__driver.execute_async_script('''
var callback = arguments[arguments.length - 1];
document.pySessionHandler.setAllObjectsAsync(arguments[0], arguments[1], callback);
''', idb.as_dict(), self.__session.get_idb_st_layout() if self.__session.idb_special_treatment else None)
if self.__session.idb_special_treatment:
self.__log.info("IDB special treatment required. Running special actions...")
st_layout = self.__session.get_idb_st_layout()
st_data = {}
for st_db, st_os_list in st_layout.items():
st_data[st_db] = {}
for st_os in st_os_list:
st_data[st_db][st_os] = idb.get_db(st_db).get_object_store(st_os).get_data()
self.__session.do_idb_st_set_action(self.__driver, st_data)
self.__log.info("Finished writing data to IDB!")
def __verify_profile_name_exists(self, profile_name: str) -> bool:
if self.__custom_driver:
raise AssertionError('Do not call this method if you are using a custom webdriver.')
# NOTE: Is this still required?
if not isinstance(profile_name, str):
raise TypeError('The provided profile_name is not a string.')
return True if profile_name in self.__browser_profile_list else False
def __wait_for_login(self, timeout=120):
login_success = True
self.__log.info('Waiting for login... [Timeout: %ss]', timeout)
# TODO: rewrite this for the general approach
self.__log.debug(f'Waiting until {self.__session.get_name()} finished loading...')
try:
WebDriverWait(self.__driver, 120).until(
ec.visibility_of_element_located((By.TAG_NAME, 'h1'))
)
self.__log.info('Login completed.')
except TimeoutException:
login_success = False
self.__log.error('Login was not completed in time. Aborting...')
return login_success
def __start_session(self, options: Optional[Union[c_op.Options, f_op.Options]] = None,
profile_name: Optional[str] = None, wait_for_login=True) -> NoReturn:
if not self.__custom_driver and options is None:
raise ValueError("Do not call this method without providing options for the webdriver.")
if profile_name is None:
if not self.__custom_driver:
self.__log.info('Starting browser... [HEADLESS: %s]', str(options.headless))
if self.__browser_choice == Browser.CHROME:
self.__driver = webdriver.Chrome(options=options)
elif self.__browser_choice == Browser.FIREFOX:
self.__driver = webdriver.Firefox(options=options)
else:
self.__log.debug('Checking if current browser window can be used...')
if self.__browser_choice == Browser.CHROME:
if self.__driver.current_url != 'chrome://new-tab-page/' and self.__driver.current_url != 'data:,':
self.__driver.execute_script('window.open()')
self.__driver.switch_to.window(self.__driver.window_handles[-1])
elif self.__browser_choice == Browser.FIREFOX:
if self.__driver.current_url != "about:blank":
self.__driver.execute_script('window.open()')
self.__driver.switch_to.window(self.__driver.window_handles[-1])
self.__log.info(f'Loading {self.__session.get_name()}...')
self.__driver.get(self.__session.get_url())
if wait_for_login:
if not self.__wait_for_login():
return
else:
self.__log.info('Starting browser... [HEADLESS: %s]', str(options.headless))
if self.__browser_choice == Browser.CHROME:
options.add_argument('user-data-dir=%s' % os.path.join(self.__browser_user_dir, profile_name))
self.__driver = webdriver.Chrome(options=options)
elif self.__browser_choice == Browser.FIREFOX:
fire_profile = webdriver.FirefoxProfile(os.path.join(self.__browser_user_dir, profile_name))
self.__driver = webdriver.Firefox(fire_profile, options=options)
self.__log.info(f'Loading {self.__session.get_name()}...')
self.__driver.get(self.__session.get_url())
def __start_visible_session(self, profile_name: Optional[str] = None, wait_for_login=True) -> NoReturn:
options = self.__browser_options
options.headless = False
if profile_name is not None:
self.__verify_profile_name_exists(profile_name)
self.__start_session(options, profile_name, wait_for_login)
def __start_invisible_session(self, profile_name: Optional[str] = None) -> NoReturn:
if profile_name is not None:
self.__verify_profile_name_exists(profile_name)
self.__start_session(self.__browser_options, profile_name)
def __get_profile_session(self, profile_name: Optional[str] = None) -> SessionObject:
if profile_name is None:
if self.__custom_driver:
self.__start_session()
else:
self.__start_visible_session()
else:
self.__start_invisible_session(profile_name)
cookies = self.__get_cookies()
local_storage = self.__get_local_storage()
indexed_db = self.__get_indexed_db()
if not self.__custom_driver:
self.__log.info("Closing browser...")
self.__driver.quit()
else:
self.__log.info("Closing tab...")
self.__driver.close()
self.__driver.switch_to.window(self.__driver.window_handles[-1])
return SessionObject(self.__session.get_name(), self.__session.get_url(), self.__session.get_file_ext(),
cookies, local_storage, indexed_db)
# FIXME: get and set methods do very different things
def __set_profile_session(self, session_object: SessionObject) -> NoReturn:
self.__set_cookies(session_object.cookies)
self.__set_local_storage(session_object.local_storage)
self.__set_indexed_db(session_object.indexed_db)
self.__log.info(f'Reloading {self.__session.get_name()}...')
self.__driver.refresh()
def __init__(self, session_class: SessionObject,
browser: Optional[Union[Browser, str]] = None,
driver: Optional[Union[c_wd.WebDriver, f_wd.WebDriver]] = None):
self.__log = logging.getLogger('SessionHandler')
self.__log.setLevel(logging.DEBUG)
self.__platform = platform.system().lower()
if self.__platform != 'windows' and self.__platform != 'linux':
raise NotImplementedError('Only Windows and Linux are supported for now.')
self.__log.debug('Detected platform: %s', self.__platform)
self.__session = session_class
if driver:
self.set_custom_webdriver(driver)
else:
if browser:
self.set_browser(browser)
else:
raise ValueError('Parameter browser is empty.\n'
'You need to set a browser or a custom driver during init.')
self.__init_browser()
def set_custom_webdriver(self, driver: Union[c_wd.WebDriver, f_wd.WebDriver]) -> NoReturn:
if isinstance(driver, c_wd.WebDriver):
self.__browser_choice = Browser.CHROME
elif isinstance(driver, f_wd.WebDriver):
self.__browser_choice = Browser.FIREFOX
self.__custom_driver = True
self.__driver = driver
def set_browser(self, browser: Union[Browser, str]) -> NoReturn:
if self.__driver is not None:
self.__driver.quit()
if isinstance(browser, str):
if browser.lower() == 'chrome':
self.__log.debug('Setting browser... [TYPE: %s]', 'Chrome')
self.__browser_choice = Browser.CHROME
elif browser.lower() == 'firefox':
self.__log.debug('Setting browser... [TYPE: %s]', 'Firefox')
self.__browser_choice = Browser.FIREFOX
else:
raise ValueError('The specified browser is invalid. Try to use "chrome" or "firefox" instead.')
elif isinstance(browser, Browser):
if browser == Browser.CHROME:
self.__log.debug('Setting browser... [TYPE: %s]', 'Chrome')
elif browser == Browser.FIREFOX:
self.__log.debug('Setting browser... [TYPE: %s]', 'Firefox')
self.__browser_choice = browser
else:
# NOTE: This shouldn't be needed anymore.
raise TypeError(
'Type of browser invalid. Please use Browser.CHROME or Browser.FIREFOX instead.'
)
self.__init_browser()
# TODO: Think about type aliasing
def get_active_session(self, use_profile: Optional[Union['list[str]', str]] = None, all_profiles=False) -> Union[
'dict[str, SessionObject]', 'SessionObject'
]:
self.__log.info('Make sure the specified browser profile is not being used by another process.')
profile_storage_dict = {}
use_profile_list = []
self.__refresh_profile_list()
if self.__custom_driver:
raise AssertionError('Do not call this method if you are using a custom webdriver.')
if all_profiles:
use_profile_list.extend(self.__browser_profile_list)
self.__log.info(
'Trying to get active sessions for all browser profiles of the selected type...'
)
else:
if use_profile and use_profile not in self.__browser_profile_list:
raise ValueError('Profile does not exist: %s', use_profile)
elif use_profile is None:
return self.__get_profile_session()
elif use_profile and use_profile in self.__browser_profile_list:
use_profile_list.append(use_profile)
elif isinstance(use_profile, list):
use_profile_list.extend(use_profile)
else:
raise ValueError(
'Invalid profile provided. Make sure you provided a list of profiles or a profile name.'
)
for profile in use_profile_list:
profile_storage_dict[profile] = self.__get_profile_session(profile)
return profile_storage_dict
def create_new_session(self) -> 'SessionObject':
return self.__get_profile_session()
def open_session(self) -> SessionObject:
if not self.__custom_driver:
self.__start_visible_session(wait_for_login=False)
else:
self.__start_session(wait_for_login=False)
self.__set_profile_session(self.__session)
return_session = SessionObject(
self.__session.get_name(),
self.__session.get_url(),
self.__session.get_file_ext(),
self.__get_cookies(),
self.__get_local_storage(),
self.__get_indexed_db()
)
if not self.__custom_driver:
self.__log.info('Do not reload the page manually.')
self.__log.info('Waiting until the browser window is closed...')
while True:
try:
_ = self.__driver.current_window_handle
time.sleep(1)
except WebDriverException:
break
return return_session
|
the-stack_0_26767
|
# Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A binary for generating samples given a folder of .wav files or encodings."""
import os
from magenta.models.nsynth import utils
from magenta.models.nsynth.wavenet import fastgen
import tensorflow.compat.v1 as tf # noqa
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string("source_path", "", "Path to directory with either "
".wav files or precomputed encodings in .npy files."
"If .wav files are present, use wav files. If no "
".wav files are present, use .npy files")
tf.app.flags.DEFINE_boolean("npy_only", False, "If True, use only .npy files.")
tf.app.flags.DEFINE_string("save_path", "", "Path to output file dir.")
tf.app.flags.DEFINE_string("checkpoint_path", "model.ckpt-200000",
"Path to checkpoint.")
tf.app.flags.DEFINE_integer("sample_length", 64000,
"Max output file size in samples.")
tf.app.flags.DEFINE_integer("batch_size", 1, "Number of samples per a batch.")
tf.app.flags.DEFINE_string("log", "INFO",
"The threshold for what messages will be logged."
"DEBUG, INFO, WARN, ERROR, or FATAL.")
tf.app.flags.DEFINE_integer("gpu_number", 0,
"Number of the gpu to use for multigpu generation.")
def main(unused_argv=None):
os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.gpu_number)
source_path = utils.shell_path(FLAGS.source_path)
checkpoint_path = utils.shell_path(FLAGS.checkpoint_path)
save_path = utils.shell_path(FLAGS.save_path)
if not save_path:
raise ValueError("Must specify a save_path.")
tf.logging.set_verbosity(FLAGS.log)
# Use directory of files
if tf.gfile.IsDirectory(source_path):
files = tf.gfile.ListDirectory(source_path)
file_extensions = [os.path.splitext(f)[1] for f in files]
if ".wav" in file_extensions:
file_extension = ".wav"
elif ".npy" in file_extensions:
file_extension = ".npy"
else:
raise RuntimeError("Folder must contain .wav or .npy files.")
file_extension = ".npy" if FLAGS.npy_only else file_extension
files = sorted([
os.path.join(source_path, fname)
for fname in files
if fname.lower().endswith(file_extension)
])
# Use a single file
elif source_path.lower().endswith((".wav", ".npy")):
file_extension = os.path.splitext(source_path.lower())[1]
files = [source_path]
else:
raise ValueError(
"source_path {} must be a folder or file.".format(source_path))
# Now synthesize from files one batch at a time
batch_size = FLAGS.batch_size
sample_length = FLAGS.sample_length
n = len(files)
for start in range(0, n, batch_size):
end = start + batch_size
batch_files = files[start:end]
save_names = [
os.path.join(save_path,
"gen_" + os.path.splitext(os.path.basename(f))[0] + ".wav")
for f in batch_files
]
# Encode waveforms
if file_extension == ".wav":
batch_data = fastgen.load_batch_audio(
batch_files, sample_length=sample_length)
encodings = fastgen.encode(
batch_data, checkpoint_path, sample_length=sample_length)
# Or load encodings
else:
encodings = fastgen.load_batch_encodings(
batch_files, sample_length=sample_length)
# Synthesize multi-gpu
if FLAGS.gpu_number != 0:
with tf.device("/device:GPU:%d" % FLAGS.gpu_number):
fastgen.synthesize(
encodings, save_names, checkpoint_path=checkpoint_path)
# Single gpu
else:
fastgen.synthesize(
encodings, save_names, checkpoint_path=checkpoint_path)
def console_entry_point():
tf.app.run(main)
if __name__ == "__main__":
console_entry_point()
|
the-stack_0_26769
|
# -*- mode: python; coding: utf-8 -*-
# Copyright 2019-2022 the AAS WorldWide Telescope project.
# Licensed under the MIT License.
"""
Entrypoints for the "toasty pipeline" command-line tools.
"""
__all__ = """
pipeline_getparser
pipeline_impl
""".split()
from fnmatch import fnmatch
import glob
import os.path
import sys
from wwt_data_formats.cli import EnsureGlobsExpandedAction
from ..cli import die
from . import NotActionableError
def evaluate_imageid_args(searchdir, args):
"""
Figure out which image-ID's to process.
"""
matched_ids = set()
globs_todo = set()
for arg in args:
if glob.has_magic(arg):
globs_todo.add(arg)
else:
# If an ID is explicitly (non-gobbily) added, always add it to the
# list, without checking if it exists in `searchdir`. We could check
# for it in searchdir now, but we'll have to check later anyway, so
# we don't bother.
matched_ids.add(arg)
if len(globs_todo):
for filename in os.listdir(searchdir):
for g in globs_todo:
if fnmatch(filename, g):
matched_ids.add(filename)
break
return sorted(matched_ids)
# The "approve" subcommand
def approve_setup_parser(parser):
parser.add_argument(
"--workdir",
metavar="PATH",
default=".",
help="The working directory for this processing session",
)
parser.add_argument(
"cand_ids",
nargs="+",
action=EnsureGlobsExpandedAction,
metavar="IMAGE-ID",
help="Name(s) of image(s) to approve for publication (globs accepted)",
)
def approve_impl(settings):
from wwt_data_formats.folder import Folder, make_absolutizing_url_mutator
from . import PipelineManager
mgr = PipelineManager(settings.workdir)
mgr.ensure_config()
pub_url_prefix = mgr._config.get("publish_url_prefix")
if pub_url_prefix:
if pub_url_prefix[-1] != "/":
pub_url_prefix += "/"
proc_dir = mgr._ensure_dir("processed")
app_dir = mgr._ensure_dir("approved")
for cid in evaluate_imageid_args(proc_dir, settings.cand_ids):
if not os.path.isdir(os.path.join(proc_dir, cid)):
die(f"no such processed candidate ID {cid!r}")
index_path = os.path.join(proc_dir, cid, "index.wtml")
prefix = pub_url_prefix + cid + "/"
try:
f = Folder.from_file(os.path.join(proc_dir, cid, "index_rel.wtml"))
f.mutate_urls(make_absolutizing_url_mutator(prefix))
with open(index_path, "wt", encoding="utf8") as f_out:
f.write_xml(f_out)
except Exception as e:
print(
"error: failed to create index.wtml from index_rel.wtml",
file=sys.stderr,
)
try:
os.remove(index_path)
except Exception:
pass
raise
os.rename(os.path.join(proc_dir, cid), os.path.join(app_dir, cid))
# The "fetch" subcommand
def fetch_setup_parser(parser):
parser.add_argument(
"--workdir",
metavar="PATH",
default=".",
help="The working directory for this processing session",
)
parser.add_argument(
"cand_ids",
nargs="+",
action=EnsureGlobsExpandedAction,
metavar="CAND-ID",
help="Name(s) of candidate(s) to fetch and prepare for processing (globs accepted)",
)
def fetch_impl(settings):
from . import PipelineManager
mgr = PipelineManager(settings.workdir)
cand_dir = mgr._ensure_dir("candidates")
rej_dir = mgr._ensure_dir("rejects")
src = mgr.get_image_source()
for cid in evaluate_imageid_args(cand_dir, settings.cand_ids):
# Funky structure here is to try to ensure that cdata is closed in case
# a NotActionable happens, so that we can move the directory on Windows.
try:
try:
cdata = open(os.path.join(cand_dir, cid), "rb")
except FileNotFoundError:
die(f"no such candidate ID {cid!r}")
try:
print(f"fetching {cid} ... ", end="")
sys.stdout.flush()
cachedir = mgr._ensure_dir("cache_todo", cid)
src.fetch_candidate(cid, cdata, cachedir)
print("done")
finally:
cdata.close()
except NotActionableError as e:
print("not usable:", e)
os.rename(os.path.join(cand_dir, cid), os.path.join(rej_dir, cid))
os.rmdir(cachedir)
# The "init" subcommand
def init_setup_parser(parser):
parser.add_argument(
"--azure-conn-env",
metavar="ENV-VAR-NAME",
help="The name of an environment variable contain an Azure Storage "
"connection string",
)
parser.add_argument(
"--azure-container",
metavar="CONTAINER-NAME",
help="The name of a blob container in the Azure storage account",
)
parser.add_argument(
"--azure-path-prefix",
metavar="PATH-PREFIX",
help="A slash-separated path prefix for blob I/O within the container",
)
parser.add_argument(
"--local", metavar="PATH", help="Use the local-disk I/O backend"
)
parser.add_argument(
"workdir",
nargs="?",
metavar="PATH",
default=".",
help="The working directory for this processing session",
)
def _pipeline_io_from_settings(settings):
from . import azure_io, local_io
if settings.local:
return local_io.LocalPipelineIo(settings.local)
if settings.azure_conn_env:
conn_str = os.environ.get(settings.azure_conn_env)
if not conn_str:
die(
"--azure-conn-env=%s provided, but that environment variable is unset"
% settings.azure_conn_env
)
if not settings.azure_container:
die("--azure-container-name must be provided if --azure-conn-env is")
path_prefix = settings.azure_path_prefix
if not path_prefix:
path_prefix = ""
azure_io.assert_enabled()
return azure_io.AzureBlobPipelineIo(
conn_str, settings.azure_container, path_prefix
)
die("An I/O backend must be specified with the arguments --local or --azure-*")
def init_impl(settings):
pipeio = _pipeline_io_from_settings(settings)
os.makedirs(settings.workdir, exist_ok=True)
pipeio.save_config(os.path.join(settings.workdir, "toasty-store-config.yaml"))
# The "refresh" subcommand
#
# TODO: for large feeds, we should potentially add features to make it so that
# we don't re-check every single candidate that's ever been posted.
def refresh_setup_parser(parser):
parser.add_argument(
"--workdir",
nargs="?",
metavar="PATH",
default=".",
help="The working directory for this processing session",
)
def refresh_impl(settings):
from . import PipelineManager
mgr = PipelineManager(settings.workdir)
cand_dir = mgr._ensure_dir("candidates")
rej_dir = mgr._ensure_dir("rejects")
src = mgr.get_image_source()
n_cand = 0
n_saved = 0
n_done = 0
n_skipped = 0
n_rejected = 0
for cand in src.query_candidates():
n_cand += 1
uniq_id = cand.get_unique_id()
if mgr._pipeio.check_exists(uniq_id, "index.wtml"):
n_done += 1
continue # skip already-done inputs
if mgr._pipeio.check_exists(uniq_id, "skip.flag"):
n_skipped += 1
continue # skip inputs that are explicitly flagged
cand_path = os.path.join(cand_dir, uniq_id)
try:
with open(cand_path, "wb") as f:
cand.save(f)
n_saved += 1
except NotActionableError as e:
os.remove(cand_path)
with open(os.path.join(rej_dir, uniq_id, "wb")) as f:
pass # for now, just touch the file
n_rejected += 1
print(f"analyzed {n_cand} candidates from the image source")
print(f" - {n_saved} processing candidates saved")
print(f" - {n_rejected} rejected as definitely unusable")
print(f" - {n_done} were already done")
print(f" - {n_skipped} were already marked to be ignored")
print()
print("See the `candidates` directory for candidate image IDs.")
# Other subcommands not yet split out.
def pipeline_getparser(parser):
subparsers = parser.add_subparsers(dest="pipeline_command")
def add_manager_command(name):
subp = subparsers.add_parser(name)
subp.add_argument(
"--workdir",
nargs="?",
metavar="WORKDIR",
default=".",
help="The local working directory",
)
return subp
approve_setup_parser(subparsers.add_parser("approve"))
fetch_setup_parser(subparsers.add_parser("fetch"))
add_manager_command("ignore-rejects")
init_setup_parser(subparsers.add_parser("init"))
add_manager_command("process-todos")
add_manager_command("publish")
refresh_setup_parser(subparsers.add_parser("refresh"))
def pipeline_impl(settings):
from . import PipelineManager
if settings.pipeline_command is None:
print('Run the "pipeline" command with `--help` for help on its subcommands')
return
if settings.pipeline_command == "approve":
approve_impl(settings)
elif settings.pipeline_command == "fetch":
fetch_impl(settings)
elif settings.pipeline_command == "ignore-rejects":
mgr = PipelineManager(settings.workdir)
mgr.ignore_rejects()
elif settings.pipeline_command == "init":
init_impl(settings)
elif settings.pipeline_command == "process-todos":
mgr = PipelineManager(settings.workdir)
mgr.process_todos()
elif settings.pipeline_command == "publish":
mgr = PipelineManager(settings.workdir)
mgr.publish()
elif settings.pipeline_command == "refresh":
refresh_impl(settings)
else:
die('unrecognized "pipeline" subcommand ' + settings.pipeline_command)
|
the-stack_0_26770
|
"""The Nextcloud integration."""
from datetime import timedelta
import logging
from nextcloudmonitor import NextcloudMonitor, NextcloudMonitorError
import voluptuous as vol
from homeassistant.const import (
CONF_PASSWORD,
CONF_SCAN_INTERVAL,
CONF_URL,
CONF_USERNAME,
)
from homeassistant.helpers import config_validation as cv, discovery
from homeassistant.helpers.event import track_time_interval
_LOGGER = logging.getLogger(__name__)
DOMAIN = "nextcloud"
NEXTCLOUD_COMPONENTS = ("sensor", "binary_sensor")
SCAN_INTERVAL = timedelta(seconds=60)
# Validate user configuration
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_URL): cv.url,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SCAN_INTERVAL, default=SCAN_INTERVAL): cv.time_period,
}
)
},
extra=vol.ALLOW_EXTRA,
)
BINARY_SENSORS = (
"nextcloud_system_enable_avatars",
"nextcloud_system_enable_previews",
"nextcloud_system_filelocking.enabled",
"nextcloud_system_debug",
)
SENSORS = (
"nextcloud_system_version",
"nextcloud_system_theme",
"nextcloud_system_memcache.local",
"nextcloud_system_memcache.distributed",
"nextcloud_system_memcache.locking",
"nextcloud_system_freespace",
"nextcloud_system_cpuload",
"nextcloud_system_mem_total",
"nextcloud_system_mem_free",
"nextcloud_system_swap_total",
"nextcloud_system_swap_free",
"nextcloud_system_apps_num_installed",
"nextcloud_system_apps_num_updates_available",
"nextcloud_system_apps_app_updates_calendar",
"nextcloud_system_apps_app_updates_contacts",
"nextcloud_system_apps_app_updates_tasks",
"nextcloud_system_apps_app_updates_twofactor_totp",
"nextcloud_storage_num_users",
"nextcloud_storage_num_files",
"nextcloud_storage_num_storages",
"nextcloud_storage_num_storages_local",
"nextcloud_storage_num_storages_home",
"nextcloud_storage_num_storages_other",
"nextcloud_shares_num_shares",
"nextcloud_shares_num_shares_user",
"nextcloud_shares_num_shares_groups",
"nextcloud_shares_num_shares_link",
"nextcloud_shares_num_shares_mail",
"nextcloud_shares_num_shares_room",
"nextcloud_shares_num_shares_link_no_password",
"nextcloud_shares_num_fed_shares_sent",
"nextcloud_shares_num_fed_shares_received",
"nextcloud_shares_permissions_3_1",
"nextcloud_server_webserver",
"nextcloud_server_php_version",
"nextcloud_server_php_memory_limit",
"nextcloud_server_php_max_execution_time",
"nextcloud_server_php_upload_max_filesize",
"nextcloud_database_type",
"nextcloud_database_version",
"nextcloud_database_version",
"nextcloud_activeUsers_last5minutes",
"nextcloud_activeUsers_last1hour",
"nextcloud_activeUsers_last24hours",
)
def setup(hass, config):
"""Set up the Nextcloud integration."""
# Fetch Nextcloud Monitor api data
conf = config[DOMAIN]
try:
ncm = NextcloudMonitor(conf[CONF_URL], conf[CONF_USERNAME], conf[CONF_PASSWORD])
except NextcloudMonitorError:
_LOGGER.error("Nextcloud setup failed - Check configuration")
hass.data[DOMAIN] = get_data_points(ncm.data)
hass.data[DOMAIN]["instance"] = conf[CONF_URL]
def nextcloud_update(event_time):
"""Update data from nextcloud api."""
try:
ncm.update()
except NextcloudMonitorError:
_LOGGER.error("Nextcloud update failed")
return False
hass.data[DOMAIN] = get_data_points(ncm.data)
hass.data[DOMAIN]["instance"] = conf[CONF_URL]
# Update sensors on time interval
track_time_interval(hass, nextcloud_update, conf[CONF_SCAN_INTERVAL])
for component in NEXTCLOUD_COMPONENTS:
discovery.load_platform(hass, component, DOMAIN, {}, config)
return True
# Use recursion to create list of sensors & values based on nextcloud api data
def get_data_points(api_data, key_path="", leaf=False):
"""Use Recursion to discover data-points and values.
Get dictionary of data-points by recursing through dict returned by api until
the dictionary value does not contain another dictionary and use the
resulting path of dictionary keys and resulting value as the name/value
for the data-point.
returns: dictionary of data-point/values
"""
result = {}
for key, value in api_data.items():
if isinstance(value, dict):
if leaf:
key_path = f"{key}_"
if not leaf:
key_path += f"{key}_"
leaf = True
result.update(get_data_points(value, key_path, leaf))
else:
result[f"{DOMAIN}_{key_path}{key}"] = value
leaf = False
return result
|
the-stack_0_26773
|
# -*- coding: utf-8 -*-
## \package globals.stage
# MIT licensing
# See: docs/LICENSE.txt
import os, shutil
from globals.application import APP_name
from globals.application import VERSION_string
from globals.dateinfo import GetDate
from globals.dateinfo import dtfmt
from globals.strings import GS
## Creates a directory for storing temporary files
#
# \return
# Path to new stage directory, or None if failed
def CreateStage():
stage = u'/tmp'
# Use current working directory if no write access to /tmp
if not os.access(stage, os.W_OK):
stage = os.getcwd()
#suffix = u'{}{}{}_'.format(GetYear(), GetMonthInt(), GetDayInt())
#suffix = u'_temp'
suffix = GetDate(dtfmt.STAMP)
stage = u'{}/{}-{}_{}'.format(stage, GS(APP_name).lower(), VERSION_string, suffix)
if os.access(os.path.dirname(stage), os.W_OK):
# Start with fresh directory
if os.path.isdir(stage):
shutil.rmtree(stage)
elif os.path.isfile(stage):
os.remove(stage)
os.makedirs(stage)
if os.path.isdir(stage):
return stage
## Remove a previously created stage directory
#
# \param stage
# Absolute path to directory to remove
# \return
# \b \e True if stage does not exist
def RemoveStage(stage):
if os.access(stage, os.W_OK):
shutil.rmtree(stage)
return not os.path.exists(stage)
|
the-stack_0_26780
|
# ===============================================================================
# Copyright 2017 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from __future__ import absolute_import
from __future__ import print_function
from traits.api import Array
from chaco.abstract_overlay import AbstractOverlay
from uncertainties import std_dev, nominal_value
from numpy import array
from pychron.core.stats import calculate_weighted_mean
from pychron.pipeline.plot.plotter.arar_figure import BaseArArFigure
from six.moves import zip
class RadialOverlay(AbstractOverlay):
xs = Array
ys = Array
def __init__(self, component, xs, ys, *args, **kw):
super(RadialOverlay, self).__init__(component, *args, **kw)
self._xs, self._ys = xs, ys
def overlay(self, other_component, gc, view_bounds=None, mode="normal"):
print(self.component.datasources)
class Radial(BaseArArFigure):
def plot(self, plots, legend=None):
graph = self.graph
for pid, (plotobj, po) in enumerate(zip(graph.plots, plots)):
self._plot_radial(po, plotobj, pid)
def post_make(self):
g = self.graph
for i, p in enumerate(g.plots):
l, h = self.ymis[i], self.ymas[i]
print(i, p, l, h)
g.set_y_limits(l, h, pad='0.1', plotid=i)
def _plot_radial(self, po, plot, pid):
zs = array([nominal_value(a.uage) for a in self.analyses])
es = array([std_dev(a.uage) for a in self.analyses])
zm,_ = calculate_weighted_mean(zs, es)
zs = (zs - zm)/es
yma = max(abs(zs))
es = 1/es
# xs = array([1/std_dev(a.uage) for a in self.analyses])
# ys = array([nominal_value(a.uage)/(std_dev(a.uage)) for a in self.analyses])
try:
self.ymis[pid] = min(self.ymis[pid], -yma)
self.ymas[pid] = max(self.ymas[pid], yma)
except IndexError:
self.ymis.append(-yma)
self.ymas.append(yma)
# overlay = RadialOverlay(plot, xs=xs, ys=ys)
# plot.overlays.append(overlay)
s, _ = self.graph.new_series(es, zs, type='scatter')
self._add_scatter_inspector(s)
self.graph.set_x_limits(min_=0)
# self.graph.set_y_limits(min_=-a, max_=a, pad='0.1')
# ============= EOF =============================================
|
the-stack_0_26783
|
from copy import deepcopy
resolution = 10
grid = []
next = []
def setup():
global grid, next, cols, rows
size(600, 400)
this.surface.setTitle("Conways Game of Life")
cols = width / resolution
rows = height / resolution
grid = [[floor(random(2)) for _ in range(rows)] for _ in range(cols)]
next = [[0 for _ in range(rows)] for _ in range(cols)]
frameRate(30)
def draw():
global grid, next, cols, rows
background(255)
for i in range(cols):
for j in range(rows):
x = i * resolution
y = j * resolution
if grid[i][j] == 1:
fill(255, 0, 0)
stroke(0)
rect(x, y, resolution, resolution)
else:
fill(200)
stroke(0)
rect(x, y, resolution, resolution)
# Nächste Generation
for i in range(cols):
for j in range(rows):
state = grid[i][j]
neighbors = count_neighbors(grid, i, j)
if ((state == 0) and (neighbors == 3)):
next[i][j] = 1
elif ((state == 1) and (neighbors < 2 or neighbors > 3)):
next[i][j] = 0
else:
next[i][j] = state
grid = deepcopy(next)
def count_neighbors(li, x, y):
global cols, rows
sum = 0
for i in range(-1, 2):
for j in range(-1, 2):
col = (x + i + cols) % cols
row = (y + j + rows) % rows
sum += li[col][row]
sum -= li[x][y]
return(sum)
|
the-stack_0_26784
|
import torch
import math
irange = range
def make_grid(tensor, nrow=8, padding=2,
normalize=False, range=None, scale_each=False, pad_value=0):
"""Make a grid of images.
Args:
tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
or a list of images all of the same size.
nrow (int, optional): Number of images displayed in each row of the grid.
The final grid size is ``(B / nrow, nrow)``. Default: ``8``.
padding (int, optional): amount of padding. Default: ``2``.
normalize (bool, optional): If True, shift the image to the range (0, 1),
by the min and max values specified by :attr:`range`. Default: ``False``.
range (tuple, optional): tuple (min, max) where min and max are numbers,
then these numbers are used to normalize the image. By default, min and max
are computed from the tensor.
scale_each (bool, optional): If ``True``, scale each image in the batch of
images separately rather than the (min, max) over all images. Default: ``False``.
pad_value (float, optional): Value for the padded pixels. Default: ``0``.
Example:
See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_
"""
if not (torch.is_tensor(tensor) or
(isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):
raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))
# if list of tensors, convert to a 4D mini-batch Tensor
if isinstance(tensor, list):
tensor = torch.stack(tensor, dim=0)
if tensor.dim() == 2: # single image H x W
tensor = tensor.unsqueeze(0)
if tensor.dim() == 3: # single image
if tensor.size(0) == 1: # if single-channel, convert to 3-channel
tensor = torch.cat((tensor, tensor, tensor), 0)
tensor = tensor.unsqueeze(0)
if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images
tensor = torch.cat((tensor, tensor, tensor), 1)
if normalize is True:
tensor = tensor.clone() # avoid modifying tensor in-place
if range is not None:
assert isinstance(range, tuple), \
"range has to be a tuple (min, max) if specified. min and max are numbers"
def norm_ip(img, min, max):
img.clamp_(min=min, max=max)
img.add_(-min).div_(max - min + 1e-5)
def norm_range(t, range):
if range is not None:
norm_ip(t, range[0], range[1])
else:
norm_ip(t, float(t.min()), float(t.max()))
if scale_each is True:
for t in tensor: # loop over mini-batch dimension
norm_range(t, range)
else:
norm_range(tensor, range)
if tensor.size(0) == 1:
return tensor.squeeze(0)
# make the mini-batch of images into a grid
nmaps = tensor.size(0)
xmaps = min(nrow, nmaps)
ymaps = int(math.ceil(float(nmaps) / xmaps))
height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)
num_channels = tensor.size(1)
grid = tensor.new_full((num_channels, height * ymaps + padding, width * xmaps + padding), pad_value)
k = 0
for y in irange(ymaps):
for x in irange(xmaps):
if k >= nmaps:
break
grid.narrow(1, y * height + padding, height - padding)\
.narrow(2, x * width + padding, width - padding)\
.copy_(tensor[k])
k = k + 1
return grid
def save_image(tensor, fp, nrow=8, padding=2,
normalize=False, range=None, scale_each=False, pad_value=0, format=None):
"""Save a given Tensor into an image file.
Args:
tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,
saves the tensor as a grid of images by calling ``make_grid``.
fp (string or file object): A filename or a file object
format(Optional): If omitted, the format to use is determined from the filename extension.
If a file object was used instead of a filename, this parameter should always be used.
**kwargs: Other arguments are documented in ``make_grid``.
"""
from PIL import Image
grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,
normalize=normalize, range=range, scale_each=scale_each)
# Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
im = Image.fromarray(ndarr)
im.save(fp, format=format)
|
the-stack_0_26785
|
#1.Counting sort计数排序
#2.Radix sort
#3.Bucket sort
import random
#=============================Counting sort计数排序===============================
#生成随机列表
def random_int_list(start, stop, length):
start, stop = (int(start), int(stop)) if start <=stop else (int(stop),int(start))
length = int(abs(length)) if length else 0
random_list = []
for i in range(length):
random_list.append(random.randint(start,stop))
return random_list
a = random_int_list(1,10,10)
print('生成的原始序列A的数据为:',a)
#扫描列表,找到最大值和最小值
def scan(arr,Gross,length):
max = arr[0]
min = arr[0]
for i in range(length):
if max == arr[i] or min ==arr[i]:
continue
if max<arr[i]:
max = arr[i]
if min > arr[i]:
min = arr[i]
if Gross == 'max':
return max
if Gross == 'min':
return min
max = scan(a,'max',len(a))
min = scan(a,'min',len(a))
print('最大值为:',max,' 最小值为:',min)
#生成排序列表C,length=max-min+1
def sort_counting_list(length,arr):
sort_list = [0] * length
for i in range(length):
#这个位置要在index上-1,因为序号是从0开始的
sort_list[arr[i]-1] = sort_list[arr[i]-1] + 1
return sort_list
sort_list = sort_counting_list(len(a),a)
print('计数排序序列',sort_list)
temp = []
for i in range(len(a)):
if sort_list[i]!=0:
for j in range(sort_list[i]):
#这个位置需要给index+1,因为要补回在生成排序列表C时减去的1
temp.append(i+1)
print('Counting sort: ',temp)
#============================Bucket sort桶排序============================
#根据函数划分原始数据,形成一个大致范围的有序
#例如根据f(k)=k/10
b = random_int_list(1,100,10)
for i in range(10):
x = random.random()
if x!=0:
b[i] = b[i] * x
b = [("%.3f" % i) for i in b]
b = list(map(float,b))
print('\n浮点化的B序列为:' , b)
def sort_bucket_list(arr):
#建立5个桶
bucket_alle = [[],[],[],[],[]]
#建立最终表
erfolg = []
#建立映射关系存储表f(k)=0.1x
bucket_list = [0.1]*len(arr)
#print('bucket_list 函数中:',bucket_list)
#建立映射关系f(k)=0.1x
for i in range(len(arr)):
bucket_list[i] = '%.3f' % (bucket_list[i] * arr[i])
#bucket_list = [('%.3f' % i)for i in bucket_list]
bucket_list = list(map(float,bucket_list))
#根据映射关系开始分桶
for i in range(len(arr)):
if 0<=bucket_list[i]<2:
bucket_alle[0].append(arr[i])
elif 2<=bucket_list[i]<4:
bucket_alle[1].append(arr[i])
elif 4<=bucket_list[i]<6:
bucket_alle[2].append(arr[i])
elif 6<=bucket_list[i]<8:
bucket_alle[3].append(arr[i])
elif 8<=bucket_list[i]:
bucket_alle[4].append(arr[i])
else:
print('数据错误!')
print('分桶后:',bucket_alle)
for i in range(5):
if bucket_alle[i]!= None:
bucket_alle[i].sort()
for i in range(5):
if bucket_alle[i]!=None:
erfolg.extend(bucket_alle[i])
return erfolg
bucket_list = sort_bucket_list(b)
print('桶排序后:',bucket_list)
#====================================Radix sort基数排序===========================
#先排低位,后排高位
c = random_int_list(1,99,10)
print('\n生成随机正整数序列C(1-99):',c)
def sort_radix_list(arr):
radix_list = [[],[],[],[],[],[],[],[],[],[]]
arrTemp = []
print(radix_list)
#判断个位值,根据个位数值k放到radix_list中index为k的桶中
for i in range(len(arr)):
radix_list[arr[i]%10].append(arr[i])
print('个位分桶:',radix_list)
#有元素的桶内进行第一次排序
for i in range(len(arr)):
if radix_list[i]!=None:
radix_list[i].sort()
arrTemp.extend(radix_list[i])
print('第一次桶内排序后生成一个新的中间序列:',arrTemp)
#判断十位值,根据十位数值k放到radix_list中index为k的桶中
#首先初始化radix_list
for i in range(len(arrTemp)):
del radix_list[i][:]
for i in range(len(arrTemp)):
radix_list[arrTemp[i]%100//10].append(arrTemp[i])
print('十位分桶:',radix_list)
#有元素的桶内进行第二次排序
#arrTemp初始化
del arrTemp[:]
for i in range(len(arr)):
if radix_list[i]!=None:
radix_list[i].sort()
arrTemp.extend(radix_list[i])
return arrTemp
radix_list = sort_radix_list(c)
print('基数排序后:',radix_list)
|
the-stack_0_26787
|
import face_alignment
import skimage.io
import numpy
from argparse import ArgumentParser
from skimage import img_as_ubyte
from skimage.transform import resize
from tqdm import tqdm
import os
import imageio
import numpy as np
import warnings
warnings.filterwarnings("ignore")
def extract_bbox(frame, fa):
if max(frame.shape[0], frame.shape[1]) > 640:
scale_factor = max(frame.shape[0], frame.shape[1]) / 640.0
frame = resize(frame, (int(frame.shape[0] / scale_factor), int(frame.shape[1] / scale_factor)))
frame = img_as_ubyte(frame)
else:
scale_factor = 1
frame = frame[..., :3]
bboxes = fa.face_detector.detect_from_image(frame[..., ::-1])
if len(bboxes) == 0:
return []
return np.array(bboxes)[:, :-1] * scale_factor
def bb_intersection_over_union(boxA, boxB):
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
iou = interArea / float(boxAArea + boxBArea - interArea)
return iou
def join(tube_bbox, bbox):
xA = min(tube_bbox[0], bbox[0])
yA = min(tube_bbox[1], bbox[1])
xB = max(tube_bbox[2], bbox[2])
yB = max(tube_bbox[3], bbox[3])
return (xA, yA, xB, yB)
def compute_bbox(start, end, fps, tube_bbox, frame_shape, inp, image_shape, increase_area=0.1):
left, top, right, bot = tube_bbox
width = right - left
height = bot - top
#Computing aspect preserving bbox
width_increase = max(increase_area, ((1 + 2 * increase_area) * height - width) / (2 * width))
height_increase = max(increase_area, ((1 + 2 * increase_area) * width - height) / (2 * height))
left = int(left - width_increase * width)
top = int(top - height_increase * height)
right = int(right + width_increase * width)
bot = int(bot + height_increase * height)
top, bot, left, right = max(0, top), min(bot, frame_shape[0]), max(0, left), min(right, frame_shape[1])
h, w = bot - top, right - left
start = start / fps
end = end / fps
time = end - start
scale = f'{image_shape[0]}:{image_shape[1]}'
return f'ffmpeg -i {inp} -ss {start} -t {time} -filter:v "crop={w}:{h}:{left}:{top}, scale={scale}" crop.mp4'
def compute_bbox_trajectories(trajectories, fps, frame_shape, args):
commands = []
for i, (bbox, tube_bbox, start, end) in enumerate(trajectories):
if (end - start) > args.min_frames:
command = compute_bbox(start, end, fps, tube_bbox, frame_shape, inp=args.inp, image_shape=args.image_shape, increase_area=args.increase)
commands.append(command)
return commands
def process_video(args):
device = 'cpu' if args.cpu else 'cuda'
fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=False, device=device)
video = imageio.get_reader(args.inp)
trajectories = []
previous_frame = None
fps = video.get_meta_data()['fps']
commands = []
try:
for i, frame in tqdm(enumerate(video)):
frame_shape = frame.shape
bboxes = extract_bbox(frame, fa)
## For each trajectory check the criterion
not_valid_trajectories = []
valid_trajectories = []
for trajectory in trajectories:
tube_bbox = trajectory[0]
intersection = 0
for bbox in bboxes:
intersection = max(intersection, bb_intersection_over_union(tube_bbox, bbox))
if intersection > args.iou_with_initial:
valid_trajectories.append(trajectory)
else:
not_valid_trajectories.append(trajectory)
commands += compute_bbox_trajectories(not_valid_trajectories, fps, frame_shape, args)
trajectories = valid_trajectories
## Assign bbox to trajectories, create new trajectories
for bbox in bboxes:
intersection = 0
current_trajectory = None
for trajectory in trajectories:
tube_bbox = trajectory[0]
current_intersection = bb_intersection_over_union(tube_bbox, bbox)
if intersection < current_intersection and current_intersection > args.iou_with_initial:
intersection = bb_intersection_over_union(tube_bbox, bbox)
current_trajectory = trajectory
## Create new trajectory
if current_trajectory is None:
trajectories.append([bbox, bbox, i, i])
else:
current_trajectory[3] = i
current_trajectory[1] = join(current_trajectory[1], bbox)
except IndexError as e:
raise (e)
commands += compute_bbox_trajectories(trajectories, fps, frame_shape, args)
return commands
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--image_shape", default=(256, 256), type=lambda x: tuple(map(int, x.split(','))),
help="Image shape")
parser.add_argument("--increase", default=0.1, type=float, help='Increase bbox by this amount')
parser.add_argument("--iou_with_initial", type=float, default=0.25, help="The minimal allowed iou with inital bbox")
parser.add_argument("--inp", required=True, help='Input image or video')
parser.add_argument("--min_frames", type=int, default=150, help='Minimum number of frames')
parser.add_argument("--cpu", dest="cpu", action="store_true", help="cpu mode.")
args = parser.parse_args()
commands = process_video(args)
for command in commands:
print (command)
|
the-stack_0_26788
|
from clize import ArgumentError, Parameter, run
def echo(*text:Parameter.REQUIRED,
prefix:'p'='', suffix:'s'='', reverse:'r'=False, repeat:'n'=1):
"""Echoes text back
:param text: The text to echo back
:param reverse: Reverse text before processing
:param repeat: Amount of times to repeat text
:param prefix: Prepend this to each line in word
:param suffix: Append this to each line in word
"""
text = ' '.join(text)
if 'spam' in text:
raise ArgumentError("I don't want any spam!")
if reverse:
text = text[::-1]
text = text * repeat
if prefix or suffix:
return '\n'.join(prefix + line + suffix
for line in text.split('\n'))
return text
def version():
"""Show the version"""
return 'echo version 0.2'
if __name__ == '__main__':
run(echo, alt=version)
|
the-stack_0_26789
|
import os
from os.path import dirname, join
import cPickle
import numpy as np
import sklearn
import sklearn.linear_model
from sklearn.model_selection import train_test_split
import lmdb
import sys
sys.path.append(dirname(os.path.realpath(__file__))+"/../../../python/")
import caffe
def unpickle(file):
""" unpickle the data """
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict
def shuffle_data(data, labels):
data, _, labels, _ = sklearn.model_selection.train_test_split(data, labels, test_size=0.0, random_state=42)
return data, labels
def shuffle_data_random(data, labels):
data, _, labels, _ = sklearn.model_selection.train_test_split(data, labels, test_size=0.0)
return data, labels
def load_data(train_file):
""" load the train and test data"""
d = unpickle(train_file)
data = d['data']
#coarse_labels = d['coarse_labels']
fine_labels = d['fine_labels']
length = len(d['fine_labels'])
data, labels = shuffle_data(
data,
#np.array(zip(coarse_labels, fine_labels))
np.array(fine_labels)
)
#coarse_labels, fine_labels = zip(*labels.tolist())
return (
data.reshape(length, 3, 32, 32),
#np.array(coarse_labels),
labels
)
def load_data_random(train_file):
""" load the train and test data"""
d = unpickle(train_file)
data = d['data']
#coarse_labels = d['coarse_labels']
fine_labels = d['fine_labels']
length = len(d['fine_labels'])
data, labels = shuffle_data_random(
data,
#np.array(zip(coarse_labels, fine_labels))
np.array(fine_labels)
)
#coarse_labels, fine_labels = zip(*labels.tolist())
return (
data.reshape(length, 3, 32, 32),
#np.array(coarse_labels),
labels
)
if __name__=='__main__':
cifar_python_directory = dirname(os.path.realpath(__file__))+"/../../../data/cifar100/"
#meta=unpickle(os.path.join(cifar_python_directory, 'meta'))
#fine_label_names=meta['fine_label_names']
#print(fine_label_names)
print("Converting...")
cifar_caffe_directory = os.path.abspath("cifar100_train_lmdb")
if not os.path.exists(cifar_caffe_directory):
X, y_f = load_data_random(os.path.join(cifar_python_directory, 'train'))
Xt, yt_f = load_data(os.path.join(cifar_python_directory, 'test'))
print("Data is fully loaded, now truly convertung.")
env = lmdb.open(cifar_caffe_directory, map_size=50000*1000*5)
txn = env.begin(write=True)
count = 0
for i in range(X.shape[0]):
datum = caffe.io.array_to_datum(X[i], y_f[i])
str_id = '{:08}'.format(count)
txn.put(str_id, datum.SerializeToString())
count+=1
if count%1000==0:
print('already handled with {} pictures'.format(count))
txn.commit()
txn = env.begin(write=True)
txn.commit()
env.close()
env = lmdb.open('cifar100_test_lmdb', map_size=10000*1000*5)
txn = env.begin(write=True)
count = 0
for i in range(Xt.shape[0]):
datum = caffe.io.array_to_datum(Xt[i], yt_f[i])
str_id = '{:08}'.format(count)
txn.put(str_id, datum.SerializeToString())
count+=1
if count%1000==0:
print('already handled with {} pictures'.format(count))
txn.commit()
txn = env.begin(write=True)
txn.commit()
env.close()
else:
print("Conversion was already done. Did not convert twice.")
|
the-stack_0_26791
|
""" Python 'utf-8-sig' Codec
This work similar to UTF-8 with the following changes:
* On encoding/writing a UTF-8 encoded BOM will be prepended/written as the
first three bytes.
* On decoding/reading if the first three bytes are a UTF-8 encoded BOM, these
bytes will be skipped.
"""
import codecs
### Codec APIs
def encode(input, errors='strict'):
return (codecs.BOM_UTF8 + codecs.utf_8_encode(input, errors)[0],
len(input))
def decode(input, errors='strict'):
prefix = 0
if input[:3] == codecs.BOM_UTF8:
input = input[3:]
prefix = 3
(output, consumed) = codecs.utf_8_decode(input, errors, True)
return (output, consumed+prefix)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
codecs.IncrementalEncoder.__init__(self, errors)
self.first = 1
def encode(self, input, final=False):
if self.first:
self.first = 0
return codecs.BOM_UTF8 + \
codecs.utf_8_encode(input, self.errors)[0]
else:
return codecs.utf_8_encode(input, self.errors)[0]
def reset(self):
codecs.IncrementalEncoder.reset(self)
self.first = 1
def getstate(self):
return self.first
def setstate(self, state):
self.first = state
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def __init__(self, errors='strict'):
codecs.BufferedIncrementalDecoder.__init__(self, errors)
self.first = 1
def _buffer_decode(self, input, errors, final):
if self.first:
if len(input) < 3:
if codecs.BOM_UTF8.startswith(input):
# not enough data to decide if this really is a BOM
# => try again on the next call
return ("", 0)
else:
self.first = 0
else:
self.first = 0
if input[:3] == codecs.BOM_UTF8:
(output, consumed) = \
codecs.utf_8_decode(input[3:], errors, final)
return (output, consumed+3)
return codecs.utf_8_decode(input, errors, final)
def reset(self):
codecs.BufferedIncrementalDecoder.reset(self)
self.first = 1
def getstate(self):
state = codecs.BufferedIncrementalDecoder.getstate(self)
# state[1] must be 0 here, as it isn't passed along to the caller
return (state[0], self.first)
def setstate(self, state):
# state[1] will be ignored by BufferedIncrementalDecoder.setstate()
codecs.BufferedIncrementalDecoder.setstate(self, state)
self.first = state[1]
class StreamWriter(codecs.StreamWriter):
def reset(self):
codecs.StreamWriter.reset(self)
try:
del self.encode
except AttributeError:
pass
def encode(self, input, errors='strict'):
self.encode = codecs.utf_8_encode
return encode(input, errors)
class StreamReader(codecs.StreamReader):
def reset(self):
codecs.StreamReader.reset(self)
try:
del self.decode
except AttributeError:
pass
def decode(self, input, errors='strict'):
if len(input) < 3:
if codecs.BOM_UTF8.startswith(input):
# not enough data to decide if this is a BOM
# => try again on the next call
return ("", 0)
elif input[:3] == codecs.BOM_UTF8:
self.decode = codecs.utf_8_decode
(output, consumed) = codecs.utf_8_decode(input[3:],errors)
return (output, consumed+3)
# (else) no BOM present
self.decode = codecs.utf_8_decode
return codecs.utf_8_decode(input, errors)
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-8-sig',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
the-stack_0_26792
|
"""System test for decision.py"""
import sys
sys.path.insert(0, '..\\decision')
from decision import Node, Result
import dials
def test_1():
node1 = Node("node1")
node1.set_dial(0.5)
node1.activate(1.0)
assert node1.total_input == 1.0
def test_2():
node1 = Node("node1")
node2 = Node("node2")
node1.attach_sub_node(node2, dials.duo_dial_low)
node1.set_dial(0.5)
node1.activate(0.5)
assert node2.total_input == 0.25
|
the-stack_0_26793
|
# -*- coding: utf-8 -*-
"""
Created on 15/05/2020
@author: yhagos
"""
import pandas as pd
import os
import numpy as np
import itertools
from scipy.spatial.distance import cdist
import multiprocessing as mp
pd.options.mode.chained_assignment = None
class IdentifyMarkersCoExpression:
def __init__(self, combined_cell_pos_dir, patient_id, output_dir, threshold, coexpression_proteins, num_processes=1):
self.combined_cell_pos_dir = combined_cell_pos_dir
self.patient_id = patient_id
self.output_dir = output_dir
self.threshold = threshold
self.coexpression_proteins = coexpression_proteins
self.num_processes = num_processes
def identify_co_expressing_cells(self, file_names_list, process_num):
# create output file names and check if they exist in the path
save_dir = os.path.join(self.output_dir, self.patient_id)
os.makedirs(save_dir, exist_ok=True)
for n_, file_name in enumerate(file_names_list):
print('Process:{}, Patient Id:{}, File name:{}, {}/{}'.format(process_num + 1, self.patient_id, file_name,
n_ + 1, len(file_names_list)))
msi_name = os.path.splitext(file_name)[0]
output_csv = os.path.join(save_dir, msi_name + '_co_exp.csv')
# if os.path.isfile(output_csv) :
# print('{} already exists'.format(output_csv))
# continue
cell_data_df = pd.read_csv(os.path.join(self.combined_cell_pos_dir, self.patient_id, file_name))
col_names = cell_data_df.columns
cell_data_df_copy = cell_data_df.copy()
cell_phenotype_data = pd.DataFrame(columns=['X', 'Y', 'CellPhenotype'])
for co_exp in self.coexpression_proteins:
overlap_df = pd.DataFrame(columns=col_names)
pos_markers, neg_markers = self.split_markers_status(co_exp)
# coexp_protein_list = co_exp.split('+')
coexp_protein_list = pos_markers
if pos_markers.__len__() == 0:
raise Exception(f"incorrect cell phenotype input:{co_exp}. There should be at least one +ve marker.")
elif pos_markers.__len__() == 1:
overlap_df = cell_data_df.loc[cell_data_df['Component'] == pos_markers[0], ['X', 'Y']].reset_index()
else:
# empty index
co_expression_available = True
coexp_markers_index_database = {f"{protein}": [] for protein in coexp_protein_list}
protein_pairs = list(itertools.combinations(coexp_protein_list, 2))
for protein_pair in protein_pairs:
protein_1_data = cell_data_df.loc[cell_data_df['Component'] == protein_pair[0]].reset_index()
protein_1_data = protein_1_data.rename(columns={'index': 'INDEX_'})
# for more than 2 markers expression if there is data from previous computation; consider it
if coexp_markers_index_database[protein_pair[0]].__len__() != 0:
protein_1_data = protein_1_data.loc[protein_1_data['INDEX_'].isin(coexp_markers_index_database[protein_pair[0]]), :]
else:
pass
# protein 2 data frame
protein_2_data = cell_data_df.loc[cell_data_df['Component'] == protein_pair[1]].reset_index()
protein_2_data = protein_2_data.rename(columns={'index': 'INDEX_'})
if coexp_markers_index_database[protein_pair[1]].__len__() != 0:
protein_2_data = protein_2_data.loc[protein_2_data['INDEX_'].isin(coexp_markers_index_database[protein_pair[1]]), :]
else:
pass
overlap_index_input1, overlap_index_input2 = self.get_co_exp_cells_detail(protein_1_data, protein_2_data)
if overlap_index_input1.__len__() == 0:
co_expression_available = False
break
indexs_dict = dict()
indexs_dict[protein_pair[0]] = overlap_index_input1
indexs_dict[protein_pair[1]] = overlap_index_input2
coexp_markers_index_database = self.update_coexpression_database(coexp_markers_index_database, indexs_dict)
# update which is overlapping and not
if co_expression_available:
overlapping_indices = self.get_index_co_expressing_markers_position(coexp_markers_index_database)
cell_data_df_copy.loc[overlapping_indices, 'Component'] = 'co_exp'
# get overlap data
overlap_df = self.get_overlap_data(coexp_database=coexp_markers_index_database, data=cell_data_df_copy.copy())
# overlap_df['CellPhenotype'] = co_exp
# overlap_df_all = pd.concat([overlap_df_all, overlap_df], ignore_index=True, axis=0, sort=False)
else:
pass
if overlap_df.__len__() == 0: # if there are no expressing the +ve markers --> go to next step
continue
else:
# check for the -ve markers
# protein 2 data frame
# df = overlap_df.copy()
for neg_marker in neg_markers:
neg_marker_data = cell_data_df.loc[cell_data_df['Component'] == neg_marker, :].reset_index()
neg_marker_data = neg_marker_data.rename(columns={'index': 'INDEX_'})
overlap_df = overlap_df.reset_index().rename(columns={'index': 'INDEX_'})
overlap_index_input1, _ = self.get_co_exp_cells_detail(overlap_df.copy(),
neg_marker_data)
if overlap_index_input1.__len__() == 0:
continue
else:
# remove these cells
overlap_df = overlap_df.loc[~overlap_df['INDEX_'].isin(overlap_index_input1), :]
overlap_df.drop(columns=['INDEX_'], inplace=True)
if overlap_df.__len__() == 0:
break
overlap_df['CellPhenotype'] = co_exp
cell_phenotype_data = pd.concat([cell_phenotype_data, overlap_df[['X', 'Y', 'CellPhenotype']]],
axis=0, ignore_index=True)
cell_phenotype_data[['X', 'Y']] = cell_phenotype_data[['X', 'Y']].round().astype('int32')
cell_phenotype_data.to_csv(output_csv, index=False)
# cell_data_df_copy.drop(columns=['Class'], inplace=True)
# overlap_df_all.drop(columns=['Class'], inplace=True)
# # drop all cells co-expressing different markers from cell_data_df_copy
# # cell_data_df_copy.drop(cell_data_df_copy.index[cell_data_df_copy['Component'] == 'co_exp'], inplace=True)
# non_overlap_df_data = cell_data_df_copy.loc[cell_data_df_copy['Component'] != 'co_exp', :]
# # concatenate single marker expressing cells and co-expressing cells
# # combined_df_all = pd.concat([overlap_df_all, cell_data_df_copy], ignore_index=True, axis=0, sort=False)
# combined_df_all = pd.concat([overlap_df_all, non_overlap_df_data], ignore_index=True, axis=0, sort=False)
# combined_df_all.to_csv(output_csv, index=False)
def split_markers_status(self, cell_phenotype):
pos_status_markers, neg_status_markers = [], []
for marker_status in cell_phenotype.split('/'):
if marker_status.endswith("+"):
pos_status_markers.append(marker_status[:-1])
elif marker_status.endswith("-"):
neg_status_markers.append(marker_status[:-1])
else:
raise Exception(f"wrong input found {marker_status}")
return pos_status_markers, neg_status_markers
def get_overlap_data(self, coexp_database: dict, data: pd.DataFrame) -> pd.DataFrame:
df_overlap = pd.DataFrame()
for protein, index_values in coexp_database.items():
df = data.iloc[index_values, :].reset_index(drop=True)
if df_overlap.__len__() == 0:
df_overlap = df
else:
# summation
df_overlap[['X', 'Y']] = df_overlap[['X', 'Y']] + df[['X', 'Y']]
# average
df_overlap[['X', 'Y']] = df_overlap[['X', 'Y']] // coexp_database.__len__()
return df_overlap
def get_index_co_expressing_markers_position(self, coexp_database: dict) -> list:
index_list = []
for _, val in coexp_database.items():
index_list += val
return index_list
def update_coexpression_database(self, database, current_computation):
updated_database = database.copy()
if self.__database_empty(database):
for protein, values in current_computation.items():
updated_database[protein] = current_computation[protein]
else:
# do the update using the non-empty field
for protein, values in current_computation.items():
if database[protein].__len__() != 0:
# get index of common values
common_values_index = self.__get_common_values_index(values1=database[protein],
values2=current_computation[protein])
# update dictionary
updated_database = self.__do_updates(database, common_values_index)
break
# the above loop only will look only for one protein: so make sure you update here
for protein, values in current_computation.items():
updated_database[protein] = values
return updated_database
def __do_updates(self, database: dict, index_values_included: list) -> dict:
new_db = dict()
for protein, values in database.items():
if values.__len__() != 0:
new_db[protein] = [values[index_val] for index_val in index_values_included]
else:
new_db[protein] = []
return new_db
def __database_empty(self, database):
vals = []
for _, val in database.items():
vals += val
return vals.__len__() == 0
def __get_common_values_index(self, values1: list, values2: list) -> list:
common_values_index = []
for val in values2:
common_values_index.append(values1.index(val))
return common_values_index
def get_co_exp_cells_detail(self, df1, df2):
# if either of cell_data_df_p1 or cell_data_df_p2 are empty
if len(df1) == 0 or len(df2) == 0:
return [], []
else:
# compute euclidean distance between the cell pos
euclidean_dist = cdist(df1[['X', 'Y']].to_numpy(), df2[['X', 'Y']].to_numpy(), metric='euclidean')
# find the location where in 2d space, which is the minimum distance
arg_min_dist = np.argmin(euclidean_dist, axis=1)
is_argmin = np.full(euclidean_dist.shape, fill_value=False, dtype=bool)
is_argmin[(np.array(np.arange(euclidean_dist.shape[0])), np.array(arg_min_dist))] = True
# distance and threshold
is_overlap = euclidean_dist <= self.threshold
# masking: identify overlapping expression level
is_overlap = np.logical_and(is_overlap, is_argmin)
df1_index, df2_index = np.where(is_overlap)
# THESE ARE INDICES IN THE ORIGINAL CSV FILES
overlap_index_input1 = df1.iloc[df1_index, :]['INDEX_'].to_list()
overlap_index_input2 = df2.iloc[df2_index, :]['INDEX_'].to_list()
# return overlap_df, overlap_pos_dict
return overlap_index_input1, overlap_index_input2
def run_co_exp_analysis(self):
file_lists = os.listdir(os.path.join(self.combined_cell_pos_dir, self.patient_id))
if self.num_processes > 1:
n = len(file_lists)
if n < self.num_processes:
num_processes = n
num_elem_per_process = int(np.ceil(n / self.num_processes))
file_names_list_list = []
for i in range(self.num_processes):
start_ = i * num_elem_per_process
x = file_lists[start_: start_ + num_elem_per_process]
file_names_list_list.append(x)
print('{} processes created.'.format(self.num_processes))
# create list of processes
processes = [
mp.Process(target=self.identify_co_expressing_cells,
args=(file_names_list_list[process_num],
process_num)) for process_num in range(self.num_processes)]
print('processes created')
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
print('All Processes finished!!!')
else:
self.identify_co_expressing_cells(file_lists, 0)
if __name__ == '__main__':
pass
|
the-stack_0_26797
|
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import random
from twisted.internet import defer
import synapse.util.logcontext
from synapse.api.errors import CodeMessageException
logger = logging.getLogger(__name__)
class NotRetryingDestination(Exception):
def __init__(self, retry_last_ts, retry_interval, destination):
"""Raised by the limiter (and federation client) to indicate that we are
are deliberately not attempting to contact a given server.
Args:
retry_last_ts (int): the unix ts in milliseconds of our last attempt
to contact the server. 0 indicates that the last attempt was
successful or that we've never actually attempted to connect.
retry_interval (int): the time in milliseconds to wait until the next
attempt.
destination (str): the domain in question
"""
msg = "Not retrying server %s." % (destination,)
super(NotRetryingDestination, self).__init__(msg)
self.retry_last_ts = retry_last_ts
self.retry_interval = retry_interval
self.destination = destination
@defer.inlineCallbacks
def get_retry_limiter(destination, clock, store, ignore_backoff=False, **kwargs):
"""For a given destination check if we have previously failed to
send a request there and are waiting before retrying the destination.
If we are not ready to retry the destination, this will raise a
NotRetryingDestination exception. Otherwise, will return a Context Manager
that will mark the destination as down if an exception is thrown (excluding
CodeMessageException with code < 500)
Args:
destination (str): name of homeserver
clock (synapse.util.clock): timing source
store (synapse.storage.transactions.TransactionStore): datastore
ignore_backoff (bool): true to ignore the historical backoff data and
try the request anyway. We will still reset the retry_interval on success.
Example usage:
try:
limiter = yield get_retry_limiter(destination, clock, store)
with limiter:
response = yield do_request()
except NotRetryingDestination:
# We aren't ready to retry that destination.
raise
"""
retry_last_ts, retry_interval = (0, 0)
retry_timings = yield store.get_destination_retry_timings(destination)
if retry_timings:
retry_last_ts, retry_interval = (
retry_timings["retry_last_ts"],
retry_timings["retry_interval"],
)
now = int(clock.time_msec())
if not ignore_backoff and retry_last_ts + retry_interval > now:
raise NotRetryingDestination(
retry_last_ts=retry_last_ts,
retry_interval=retry_interval,
destination=destination,
)
# if we are ignoring the backoff data, we should also not increment the backoff
# when we get another failure - otherwise a server can very quickly reach the
# maximum backoff even though it might only have been down briefly
backoff_on_failure = not ignore_backoff
defer.returnValue(
RetryDestinationLimiter(
destination,
clock,
store,
retry_interval,
backoff_on_failure=backoff_on_failure,
**kwargs
)
)
class RetryDestinationLimiter(object):
def __init__(
self,
destination,
clock,
store,
retry_interval,
min_retry_interval=10 * 60 * 1000,
max_retry_interval=24 * 60 * 60 * 1000,
multiplier_retry_interval=5,
backoff_on_404=False,
backoff_on_failure=True,
):
"""Marks the destination as "down" if an exception is thrown in the
context, except for CodeMessageException with code < 500.
If no exception is raised, marks the destination as "up".
Args:
destination (str)
clock (Clock)
store (DataStore)
retry_interval (int): The next retry interval taken from the
database in milliseconds, or zero if the last request was
successful.
min_retry_interval (int): The minimum retry interval to use after
a failed request, in milliseconds.
max_retry_interval (int): The maximum retry interval to use after
a failed request, in milliseconds.
multiplier_retry_interval (int): The multiplier to use to increase
the retry interval after a failed request.
backoff_on_404 (bool): Back off if we get a 404
backoff_on_failure (bool): set to False if we should not increase the
retry interval on a failure.
"""
self.clock = clock
self.store = store
self.destination = destination
self.retry_interval = retry_interval
self.min_retry_interval = min_retry_interval
self.max_retry_interval = max_retry_interval
self.multiplier_retry_interval = multiplier_retry_interval
self.backoff_on_404 = backoff_on_404
self.backoff_on_failure = backoff_on_failure
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
valid_err_code = False
if exc_type is None:
valid_err_code = True
elif not issubclass(exc_type, Exception):
# avoid treating exceptions which don't derive from Exception as
# failures; this is mostly so as not to catch defer._DefGen.
valid_err_code = True
elif issubclass(exc_type, CodeMessageException):
# Some error codes are perfectly fine for some APIs, whereas other
# APIs may expect to never received e.g. a 404. It's important to
# handle 404 as some remote servers will return a 404 when the HS
# has been decommissioned.
# If we get a 401, then we should probably back off since they
# won't accept our requests for at least a while.
# 429 is us being aggresively rate limited, so lets rate limit
# ourselves.
if exc_val.code == 404 and self.backoff_on_404:
valid_err_code = False
elif exc_val.code in (401, 429):
valid_err_code = False
elif exc_val.code < 500:
valid_err_code = True
else:
valid_err_code = False
if valid_err_code:
# We connected successfully.
if not self.retry_interval:
return
logger.debug(
"Connection to %s was successful; clearing backoff", self.destination
)
retry_last_ts = 0
self.retry_interval = 0
elif not self.backoff_on_failure:
return
else:
# We couldn't connect.
if self.retry_interval:
self.retry_interval *= self.multiplier_retry_interval
self.retry_interval *= int(random.uniform(0.8, 1.4))
if self.retry_interval >= self.max_retry_interval:
self.retry_interval = self.max_retry_interval
else:
self.retry_interval = self.min_retry_interval
logger.info(
"Connection to %s was unsuccessful (%s(%s)); backoff now %i",
self.destination,
exc_type,
exc_val,
self.retry_interval,
)
retry_last_ts = int(self.clock.time_msec())
@defer.inlineCallbacks
def store_retry_timings():
try:
yield self.store.set_destination_retry_timings(
self.destination, retry_last_ts, self.retry_interval
)
except Exception:
logger.exception("Failed to store destination_retry_timings")
# we deliberately do this in the background.
synapse.util.logcontext.run_in_background(store_retry_timings)
|
the-stack_0_26798
|
import torch
from lib.utils import is_parallel
import numpy as np
np.set_printoptions(threshold=np.inf)
import cv2
from sklearn.cluster import DBSCAN
def build_targets(cfg, predictions, targets, model):
'''
predictions
[16, 3, 32, 32, 85]
[16, 3, 16, 16, 85]
[16, 3, 8, 8, 85]
torch.tensor(predictions[i].shape)[[3, 2, 3, 2]]
[32,32,32,32]
[16,16,16,16]
[8,8,8,8]
targets[3,x,7]
t [index, class, x, y, w, h, head_index]
'''
# Build targets for compute_loss(), input targets(image,class,x,y,w,h)
det = model.module.model[model.module.detector_index] if is_parallel(model) \
else model.model[model.detector_index] # Detect() module
# print(type(model))
# det = model.model[model.detector_index]
# print(type(det))
na, nt = det.na, targets.shape[0] # number of anchors=3, targets
tcls, tbox, indices, anch = [], [], [], []
gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
g = 0.5 # bias
off = torch.tensor([[0, 0],
[1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
# [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
], device=targets.device).float() * g # offsets
for i in range(det.nl):
anchors = det.anchors[i] #[3,2]
gain[2:6] = torch.tensor(predictions[i].shape)[[3, 2, 3, 2]] # xyxy gain
# Match targets to anchors
t = targets * gain
if nt:
# Matches
r = t[:, :, 4:6] / anchors[:, None] # wh ratio
j = torch.max(r, 1. / r).max(2)[0] < cfg.TRAIN.ANCHOR_THRESHOLD # compare
# j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
t = t[j] # filter
# Offsets
gxy = t[:, 2:4] # grid xy
gxi = gain[[2, 3]] - gxy # inverse
j, k = ((gxy % 1. < g) & (gxy > 1.)).T
l, m = ((gxi % 1. < g) & (gxi > 1.)).T
j = torch.stack((torch.ones_like(j), j, k, l, m))
t = t.repeat((5, 1, 1))[j]
offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
else:
t = targets[0]
offsets = 0
# Define
b, c = t[:, :2].long().T # image, class
gxy = t[:, 2:4] # grid xy
gwh = t[:, 4:6] # grid wh
gij = (gxy - offsets).long()
gi, gj = gij.T # grid xy indices
# Append
a = t[:, 6].long() # anchor indices
indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
anch.append(anchors[a]) # anchors
tcls.append(c) # class
return tcls, tbox, indices, anch
def morphological_process(image, kernel_size=5, func_type=cv2.MORPH_CLOSE):
"""
morphological process to fill the hole in the binary segmentation result
:param image:
:param kernel_size:
:return:
"""
if len(image.shape) == 3:
raise ValueError('Binary segmentation result image should be a single channel image')
if image.dtype is not np.uint8:
image = np.array(image, np.uint8)
kernel = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE, ksize=(kernel_size, kernel_size))
# close operation fille hole
closing = cv2.morphologyEx(image, func_type, kernel, iterations=1)
return closing
def connect_components_analysis(image):
"""
connect components analysis to remove the small components
:param image:
:return:
"""
if len(image.shape) == 3:
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
gray_image = image
# print(gray_image.dtype)
return cv2.connectedComponentsWithStats(gray_image, connectivity=8, ltype=cv2.CV_32S)
def if_y(samples_x):
for sample_x in samples_x:
if len(sample_x):
# if len(sample_x) != (sample_x[-1] - sample_x[0] + 1) or sample_x[-1] == sample_x[0]:
if sample_x[-1] == sample_x[0]:
return False
return True
def fitlane(mask, sel_labels, labels, stats):
H, W = mask.shape
for label_group in sel_labels:
states = [stats[k] for k in label_group]
x, y, w, h, _ = states[0]
# if len(label_group) > 1:
# print('in')
# for m in range(len(label_group)-1):
# labels[labels == label_group[m+1]] = label_group[0]
t = label_group[0]
# samples_y = np.linspace(y, H-1, 30)
# else:
samples_y = np.linspace(y, y+h-1, 30)
samples_x = [np.where(labels[int(sample_y)]==t)[0] for sample_y in samples_y]
if if_y(samples_x):
samples_x = [int(np.mean(sample_x)) if len(sample_x) else -1 for sample_x in samples_x]
samples_x = np.array(samples_x)
samples_y = np.array(samples_y)
samples_y = samples_y[samples_x != -1]
samples_x = samples_x[samples_x != -1]
func = np.polyfit(samples_y, samples_x, 2)
x_limits = np.polyval(func, H-1)
# if (y_max + h - 1) >= 720:
if x_limits < 0 or x_limits > W:
# if (y_max + h - 1) > 720:
# draw_y = np.linspace(y, 720-1, 720-y)
draw_y = np.linspace(y, y+h-1, h)
else:
# draw_y = np.linspace(y, y+h-1, y+h-y)
draw_y = np.linspace(y, H-1, H-y)
draw_x = np.polyval(func, draw_y)
# draw_y = draw_y[draw_x < W]
# draw_x = draw_x[draw_x < W]
draw_points = (np.asarray([draw_x, draw_y]).T).astype(np.int32)
cv2.polylines(mask, [draw_points], False, 1, thickness=15)
else:
# if ( + w - 1) >= 1280:
samples_x = np.linspace(x, W-1, 30)
# else:
# samples_x = np.linspace(x, x_max+w-1, 30)
samples_y = [np.where(labels[:, int(sample_x)]==t)[0] for sample_x in samples_x]
samples_y = [int(np.mean(sample_y)) if len(sample_y) else -1 for sample_y in samples_y]
samples_x = np.array(samples_x)
samples_y = np.array(samples_y)
samples_x = samples_x[samples_y != -1]
samples_y = samples_y[samples_y != -1]
try:
func = np.polyfit(samples_x, samples_y, 2)
except:
pass
# y_limits = np.polyval(func, 0)
# if y_limits > 720 or y_limits < 0:
# if (x + w - 1) >= 1280:
# draw_x = np.linspace(x, 1280-1, 1280-x)
# else:
y_limits = np.polyval(func, 0)
if y_limits >= H or y_limits < 0:
draw_x = np.linspace(x, x+w-1, w+x-x)
else:
y_limits = np.polyval(func, W-1)
if y_limits >= H or y_limits < 0:
draw_x = np.linspace(x, x+w-1, w+x-x)
# if x+w-1 < 640:
# draw_x = np.linspace(0, x+w-1, w+x-x)
else:
draw_x = np.linspace(x, W-1, W-x)
draw_y = np.polyval(func, draw_x)
draw_points = (np.asarray([draw_x, draw_y]).T).astype(np.int32)
cv2.polylines(mask, [draw_points], False, 1, thickness=15)
return mask
def connect_lane(image, shadow_height=0):
if len(image.shape) == 3:
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
gray_image = image
if shadow_height:
image[:shadow_height] = 0
mask = np.zeros((image.shape[0], image.shape[1]), np.uint8)
num_labels, labels, stats, centers = cv2.connectedComponentsWithStats(gray_image, connectivity=8, ltype=cv2.CV_32S)
# ratios = []
selected_label = []
for t in range(1, num_labels, 1):
_, _, _, _, area = stats[t]
if area > 400:
selected_label.append(t)
if len(selected_label) == 0:
return mask
else:
split_labels = [[label,] for label in selected_label]
mask_post = fitlane(mask, split_labels, labels, stats)
return mask_post
|
the-stack_0_26799
|
#!/usr/bin/env python3
# This is a simple script that takes in an scurve file produced by
# csvcolumn_to_scurve and produces a png graph of the scurve.
import argparse
import csv
import matplotlib.pyplot as plt
import numpy as np
FIELDS = ['N/total', 'New/Old']
def get_data(input_file):
global FIELDS
for row in csv.DictReader(input_file):
yield (float(row[FIELDS[0]]), float(row[FIELDS[1]]))
def main():
p = argparse.ArgumentParser()
p.add_argument('input_csv_file', type=argparse.FileType('r'))
p.add_argument('output_file', type=str)
p.add_argument('-y-axis-num-tick-marks', type=int,
help='The number of y tick marks to use above/below zero.')
p.add_argument('-y-axis-min', type=float,
help='Override the min y axis that we use')
p.add_argument('-y-axis-max', type=float,
help='Override the min y axis that we use')
p.add_argument('-title', type=str,
help='Title of the graph')
p.add_argument('-x-axis-title', type=str,
help='The title to use on the x-axis of the graph')
p.add_argument('-y-axis-title', type=str,
help='The title to use on the x-axis of the graph')
args = p.parse_args()
data = np.array(list(get_data(args.input_csv_file)))
assert np.all(data >= 0)
x = data[:, 0]
y = data[:, 1]
x_axis_title = args.x_axis_title or FIELDS[0]
y_axis_title = args.y_axis_title or FIELDS[1]
title = args.title or "{} vs {}".format(x_axis_title, y_axis_title)
fig, ax = plt.subplots()
fig.set_size_inches(18.5, 18.5)
fig.suptitle(title, fontsize=20)
ax.set_xlabel(x_axis_title, fontsize=20)
ax.set_ylabel(y_axis_title, fontsize=20)
ax.plot(x, y)
ax.scatter(x, y)
# To get good bounds, we:
#
# 1. Re-center our data at 0 by subtracting 1. This will give us the %
# difference in between new and old (i.e. (new - old)/old)
#
# 2. Then we take the maximum absolute delta from zero and round to a
# multiple of 5 away from zero. Lets call this value limit.
#
# 3. We set [min_y, max_y] = [1.0 - limit, 1.0 + limit]
recentered_data = y - 1.0
max_magnitude = int(np.max(np.abs(recentered_data)) * 100.0)
y_limit = float(((max_magnitude // 5) + 1) * 5) * 0.01
ax.set_xlim(0.0, 1.0)
y_min = args.y_axis_min or 1.0 - y_limit
y_max = args.y_axis_max or 1.0 + y_limit
assert(y_min <= y_max)
ax.set_ylim(y_min, y_max)
ax.grid(True)
ax.xaxis.set_ticks(np.arange(0.0, 1.0, 0.05))
if args.y_axis_num_tick_marks:
y_delta = y_max - y_min
y_tickmark_frequency = y_delta / float(args.y_axis_num_tick_marks)
ax.yaxis.set_ticks(np.arange(y_min, y_max, y_tickmark_frequency))
plt.savefig(args.output_file)
if __name__ == "__main__":
main()
|
the-stack_0_26800
|
#!/usr/bin/env python3
###########################################################################################
# #
# Program purpose: Find unique triplets whose three elements gives the sum of zero #
# from an array of integers. #
# Program Author : Happi Yvan <[email protected]> #
# Creation Date : September 4, 2019 #
# #
###########################################################################################
import random
def random_integer_list(list_size=10, start=0, stop=10, step=1):
list_data = []
for x in range(list_size):
list_data.append(random.randrange(start=start, stop=stop, step=step))
return list_data
def find_triplets(list_data=None):
if list_data is None:
return []
triplet_list = []
for x in range(len(list_data)):
for i in range(x+1, len(list_data), 1):
if i + 2 <= len(list_data):
val = int(list_data[x]) + int(list_data[i]) + int(list_data[i + 1])
if val == 0:
temp_data = tuple([list_data[x], list_data[i], list_data[i+1]])
if temp_data not in triplet_list:
triplet_list.append(temp_data)
return triplet_list
if __name__ == "__main__":
random_list = random_integer_list(list_size=100, start=-2, stop=15, step=1)
print(f"\nGenerated list: {random_list}")
triplet_data = find_triplets(random_list)
print(f"\nFound unique triplets: \n{triplet_data}")
|
the-stack_0_26801
|
from __future__ import absolute_import, division, print_function
import torch
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.distributions import constraints
from pyro.distributions.torch import MultivariateNormal
from pyro.distributions.util import sum_leftmost
class OMTMultivariateNormal(MultivariateNormal):
"""Multivariate normal (Gaussian) distribution with OMT gradients w.r.t. both
parameters. Note the gradient computation w.r.t. the Cholesky factor has cost
O(D^3), although the resulting gradient variance is generally expected to be lower.
A distribution over vectors in which all the elements have a joint Gaussian
density.
:param torch.Tensor loc: Mean.
:param torch.Tensor scale_tril: Cholesky of Covariance matrix.
"""
arg_constraints = {"loc": constraints.real, "scale_tril": constraints.lower_triangular}
def __init__(self, loc, scale_tril):
assert(loc.dim() == 1), "OMTMultivariateNormal loc must be 1-dimensional"
assert(scale_tril.dim() == 2), "OMTMultivariateNormal scale_tril must be 2-dimensional"
covariance_matrix = torch.mm(scale_tril, scale_tril.t())
super(OMTMultivariateNormal, self).__init__(loc, covariance_matrix)
self.scale_tril = scale_tril
def rsample(self, sample_shape=torch.Size()):
return _OMTMVNSample.apply(self.loc, self.scale_tril, sample_shape + self.loc.shape)
class _OMTMVNSample(Function):
@staticmethod
def forward(ctx, loc, scale_tril, shape):
white = loc.new_empty(shape).normal_()
z = torch.matmul(white, scale_tril.t())
ctx.save_for_backward(z, white, scale_tril)
return loc + z
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
jitter = 1.0e-8 # do i really need this?
z, epsilon, L = ctx.saved_tensors
dim = L.shape[0]
g = grad_output
loc_grad = sum_leftmost(grad_output, -1)
identity = torch.eye(dim, out=torch.tensor(g.new_empty(dim, dim)))
R_inv = torch.trtrs(identity, L.t(), transpose=False, upper=True)[0]
z_ja = z.unsqueeze(-1)
g_R_inv = torch.matmul(g, R_inv).unsqueeze(-2)
epsilon_jb = epsilon.unsqueeze(-2)
g_ja = g.unsqueeze(-1)
diff_L_ab = 0.5 * sum_leftmost(g_ja * epsilon_jb + g_R_inv * z_ja, -2)
Sigma_inv = torch.mm(R_inv, R_inv.t())
V, D, _ = torch.svd(Sigma_inv + jitter)
D_outer = D.unsqueeze(-1) + D.unsqueeze(0)
expand_tuple = tuple([-1] * (z.dim() - 1) + [dim, dim])
z_tilde = identity * torch.matmul(z, V).unsqueeze(-1).expand(*expand_tuple)
g_tilde = identity * torch.matmul(g, V).unsqueeze(-1).expand(*expand_tuple)
Y = sum_leftmost(torch.matmul(z_tilde, torch.matmul(1.0 / D_outer, g_tilde)), -2)
Y = torch.mm(V, torch.mm(Y, V.t()))
Y = Y + Y.t()
Tr_xi_Y = torch.mm(torch.mm(Sigma_inv, Y), R_inv) - torch.mm(Y, torch.mm(Sigma_inv, R_inv))
diff_L_ab += 0.5 * Tr_xi_Y
L_grad = torch.tril(diff_L_ab)
return loc_grad, L_grad, None
|
the-stack_0_26803
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import logging
from datetime import datetime
from typing import Any, Dict, Optional, TYPE_CHECKING
from superset.db_engine_specs.base import BaseEngineSpec
from superset.utils import core as utils
if TYPE_CHECKING:
from superset.connectors.sqla.models import TableColumn
from superset.models.core import Database
logger = logging.getLogger()
class DruidEngineSpec(BaseEngineSpec): # pylint: disable=abstract-method
"""Engine spec for Druid.io"""
engine = "druid"
engine_name = "Apache Druid"
allows_joins = False
allows_subqueries = True
_time_grain_expressions = {
None: "{col}",
"PT1S": "FLOOR({col} TO SECOND)",
"PT1M": "FLOOR({col} TO MINUTE)",
"PT5M": "TIME_FLOOR({col}, 'PT5M')",
"PT10M": "TIME_FLOOR({col}, 'PT10M')",
"PT15M": "TIME_FLOOR({col}, 'PT15M')",
"PT0.5H": "TIME_FLOOR({col}, 'PT30M')",
"PT1H": "FLOOR({col} TO HOUR)",
"P1D": "FLOOR({col} TO DAY)",
"P1W": "FLOOR({col} TO WEEK)",
"P1M": "FLOOR({col} TO MONTH)",
"P0.25Y": "FLOOR({col} TO QUARTER)",
"P1Y": "FLOOR({col} TO YEAR)",
}
@classmethod
def alter_new_orm_column(cls, orm_col: "TableColumn") -> None:
if orm_col.column_name == "__time":
orm_col.is_dttm = True
@staticmethod
def get_extra_params(database: "Database") -> Dict[str, Any]:
"""
For Druid, the path to a SSL certificate is placed in `connect_args`.
:param database: database instance from which to extract extras
:raises CertificateException: If certificate is not valid/unparseable
"""
try:
extra = json.loads(database.extra or "{}")
except json.JSONDecodeError as ex:
logger.error(ex)
raise ex
if database.server_cert:
engine_params = extra.get("engine_params", {})
connect_args = engine_params.get("connect_args", {})
connect_args["scheme"] = "https"
path = utils.create_ssl_cert_file(database.server_cert)
connect_args["ssl_verify_cert"] = path
engine_params["connect_args"] = connect_args
extra["engine_params"] = engine_params
return extra
@classmethod
def convert_dttm(cls, target_type: str, dttm: datetime) -> Optional[str]:
tt = target_type.upper()
if tt == utils.TemporalType.DATE:
return f"CAST(TIME_PARSE('{dttm.date().isoformat()}') AS DATE)"
if tt in (utils.TemporalType.DATETIME, utils.TemporalType.TIMESTAMP):
return f"""TIME_PARSE('{dttm.isoformat(timespec="seconds")}')"""
return None
|
the-stack_0_26805
|
import numpy as np
import pandas_datareader as pdr
import datetime as dt
import pandas as pd
import matplotlib.pyplot as plt
from get_data_api_b import get_pd_histo
start = dt.datetime(2020, 9, 1)
date_in_string = start.strftime("%d %b %Y ")
list_of_tickers = ['BTCUSDT', 'ETHUSDT', 'XTZUSDT', 'SOLUSDT' ]
data = pd.DataFrame()
for tick in list_of_tickers:
dataiso = get_pd_histo(tick, date_in_string)
dataiso = dataiso.set_index('Open_Time')
dataiso = dataiso['Close']
data[tick] = dataiso
portfolio = [.25, .15, .40, .20]
log_return = np.sum(np.log(data/data.shift())*portfolio, axis=1)
# fig, ax = plt.subplots()
# log_return.hist(bins=50, ax=ax)
# plt.show()
return_daily = log_return.mean()
std_daily = log_return.std()
sharpe_ratio = log_return.mean()/log_return.std()
annualized_shape_ratio = sharpe_ratio*364**.5
weight = np.random.random(4)
weight /= weight.sum()
log_return2 = np.sum(np.log(data/data.shift())*weight, axis=1)
sharpe_ratio2 = log_return2.mean()/log_return2.std()
asr2 = sharpe_ratio2*364**.5
print('portfolio')
print(portfolio)
print(annualized_shape_ratio)
print('randomly')
print(weight)
print(asr2)
|
the-stack_0_26807
|
# -*- coding: utf-8 -*-
r"""
Coxeter Groups
"""
#*****************************************************************************
# Copyright (C) 2009 Nicolas M. Thiery <nthiery at users.sf.net>
# 2015 Christian Stump <christian.stump at gmail.com
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#******************************************************************************
# With contributions from Dan Bump, Steve Pon, Qiang Wang, Anne Schilling, Christian Stump, Mark Shimozono
from sage.misc.cachefunc import cached_method, cached_in_parent_method
from sage.misc.lazy_import import LazyImport
from sage.misc.constant_function import ConstantFunction
from sage.misc.misc import attrcall, uniq
from sage.categories.category_singleton import Category_singleton
from sage.categories.enumerated_sets import EnumeratedSets
from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets
from sage.categories.generalized_coxeter_groups import GeneralizedCoxeterGroups
from sage.structure.element import have_same_parent
from sage.misc.flatten import flatten
from copy import copy
class CoxeterGroups(Category_singleton):
r"""
The category of Coxeter groups.
A *Coxeter group* is a group `W` with a distinguished (finite)
family of involutions `(s_i)_{i\in I}`, called the *simple
reflections*, subject to relations of the form `(s_is_j)^{m_{i,j}} = 1`.
`I` is the *index set* of `W` and `|I|` is the *rank* of `W`.
See :Wikipedia:`Coxeter_group` for details.
EXAMPLES::
sage: C = CoxeterGroups(); C
Category of coxeter groups
sage: C.super_categories()
[Category of generalized coxeter groups]
sage: W = C.example(); W
The symmetric group on {0, ..., 3}
sage: W.simple_reflections()
Finite family {0: (1, 0, 2, 3), 1: (0, 2, 1, 3), 2: (0, 1, 3, 2)}
Here are some further examples::
sage: FiniteCoxeterGroups().example()
The 5-th dihedral group of order 10
sage: FiniteWeylGroups().example()
The symmetric group on {0, ..., 3}
sage: WeylGroup(["B", 3])
Weyl Group of type ['B', 3] (as a matrix group acting on the ambient space)
Those will eventually be also in this category::
sage: SymmetricGroup(4)
Symmetric group of order 4! as a permutation group
sage: DihedralGroup(5)
Dihedral group of order 10 as a permutation group
.. TODO:: add a demo of usual computations on Coxeter groups.
.. SEEALSO::
- :mod:`sage.combinat.root_system`
- :class:`WeylGroups`
- :class:`GeneralizedCoxeterGroups`
.. WARNING::
It is assumed that morphisms in this category preserve the
distinguished choice of simple reflections. In particular,
subobjects in this category are parabolic subgroups. In this
sense, this category might be better named ``Coxeter
Systems``. In the long run we might want to have two distinct
categories, one for Coxeter groups (with morphisms being just
group morphisms) and one for Coxeter systems::
sage: CoxeterGroups().is_full_subcategory(Groups())
False
sage: from sage.categories.generalized_coxeter_groups import GeneralizedCoxeterGroups
sage: CoxeterGroups().is_full_subcategory(GeneralizedCoxeterGroups())
True
TESTS::
sage: W = CoxeterGroups().example()
sage: TestSuite(W).run()
"""
def super_categories(self):
"""
EXAMPLES::
sage: CoxeterGroups().super_categories()
[Category of generalized coxeter groups]
"""
return [GeneralizedCoxeterGroups()]
def additional_structure(self):
r"""
Return ``None``.
Indeed, all the structure Coxeter groups have in addition to
groups (simple reflections, ...) is already defined in the
super category.
.. SEEALSO:: :meth:`Category.additional_structure`
EXAMPLES::
sage: CoxeterGroups().additional_structure()
"""
return None
Finite = LazyImport('sage.categories.finite_coxeter_groups', 'FiniteCoxeterGroups')
Algebras = LazyImport('sage.categories.coxeter_group_algebras', 'CoxeterGroupAlgebras')
class ParentMethods:
def __iter__(self):
r"""
Returns an iterator over the elements of this Coxeter group.
EXAMPLES::
sage: D5 = FiniteCoxeterGroups().example(5)
sage: sorted(list(D5)) # indirect doctest (but see :meth:`._test_enumerated_set_iter_list`)
[(),
(1,),
(1, 2),
(1, 2, 1),
(1, 2, 1, 2),
(1, 2, 1, 2, 1),
(2,),
(2, 1),
(2, 1, 2),
(2, 1, 2, 1)]
sage: W = WeylGroup(["A",2,1])
sage: g = iter(W)
sage: next(g)
[1 0 0]
[0 1 0]
[0 0 1]
sage: next(g)
[-1 1 1]
[ 0 1 0]
[ 0 0 1]
sage: next(g)
[ 1 0 0]
[ 1 -1 1]
[ 0 0 1]
"""
return iter(self.weak_order_ideal(predicate = ConstantFunction(True)))
def weak_order_ideal(self, predicate, side ="right", category = None):
"""
Returns a weak order ideal defined by a predicate
INPUT:
- ``predicate``: a predicate on the elements of ``self`` defining an
weak order ideal in ``self``
- ``side``: "left" or "right" (default: "right")
OUTPUT: an enumerated set
EXAMPLES::
sage: D6 = FiniteCoxeterGroups().example(5)
sage: I = D6.weak_order_ideal(predicate = lambda w: w.length() <= 3)
sage: I.cardinality()
7
sage: list(I)
[(), (1,), (2,), (1, 2), (2, 1), (1, 2, 1), (2, 1, 2)]
We now consider an infinite Coxeter group::
sage: W = WeylGroup(["A",1,1])
sage: I = W.weak_order_ideal(predicate = lambda w: w.length() <= 2)
sage: list(iter(I))
[
[1 0] [-1 2] [ 1 0] [ 3 -2] [-1 2]
[0 1], [ 0 1], [ 2 -1], [ 2 -1], [-2 3]
]
Even when the result is finite, some features of
:class:`FiniteEnumeratedSets` are not available::
sage: I.cardinality() # todo: not implemented
5
sage: list(I) # todo: not implemented
unless this finiteness is explicitly specified::
sage: I = W.weak_order_ideal(predicate = lambda w: w.length() <= 2,
... category = FiniteEnumeratedSets())
sage: I.cardinality()
5
sage: list(I)
[
[1 0] [-1 2] [ 1 0] [ 3 -2] [-1 2]
[0 1], [ 0 1], [ 2 -1], [ 2 -1], [-2 3]
]
.. rubric:: Background
The weak order is returned as a :class:`SearchForest`.
This is achieved by assigning to each element `u1` of the
ideal a single ancestor `u=u1 s_i`, where `i` is the
smallest descent of `u`.
This allows for iterating through the elements in
roughly Constant Amortized Time and constant memory
(taking the operations and size of the generated objects
as constants).
TESTS:
We iterate over each level (i.e., breadth-first-search in the
search forest), see :trac:`19926`::
sage: W = CoxeterGroup(['A',2])
sage: [x.length() for x in W]
[0, 1, 1, 2, 2, 3]
"""
from sage.combinat.backtrack import SearchForest
def succ(u):
for i in u.descents(positive = True, side = side):
u1 = u.apply_simple_reflection(i, side)
if i == u1.first_descent(side = side) and predicate(u1):
yield u1
return
from sage.categories.finite_coxeter_groups import FiniteCoxeterGroups
default_category = FiniteEnumeratedSets() if self in FiniteCoxeterGroups() else EnumeratedSets()
return SearchForest((self.one(),), succ, algorithm='breadth',
category = default_category.or_subcategory(category))
@cached_method
def coxeter_element(self):
"""
Return a Coxeter element.
The result is the product of the simple reflections, in some order.
.. NOTE::
This implementation is shared with well generated
complex reflection groups. It would be nicer to put it
in some joint super category; however, in the current
state of the art, there is none where it's clear that
this is the right construction for obtaining a Coxeter
element.
In this context, this is an element having a regular
eigenvector (a vector not contained in any reflection
hyperplane of ``self``).
EXAMPLES::
sage: CoxeterGroup(['A', 4]).coxeter_element().reduced_word()
[1, 2, 3, 4]
sage: CoxeterGroup(['B', 4]).coxeter_element().reduced_word()
[1, 2, 3, 4]
sage: CoxeterGroup(['D', 4]).coxeter_element().reduced_word()
[1, 2, 4, 3]
sage: CoxeterGroup(['F', 4]).coxeter_element().reduced_word()
[1, 2, 3, 4]
sage: CoxeterGroup(['E', 8]).coxeter_element().reduced_word()
[1, 3, 2, 4, 5, 6, 7, 8]
sage: CoxeterGroup(['H', 3]).coxeter_element().reduced_word()
[1, 2, 3]
This method is also used for well generated finite complex
reflection groups::
sage: W = ReflectionGroup((1,1,4)) # optional - gap3
sage: W.coxeter_element().reduced_word() # optional - gap3
[1, 2, 3]
sage: W = ReflectionGroup((2,1,4)) # optional - gap3
sage: W.coxeter_element().reduced_word() # optional - gap3
[1, 2, 3, 4]
sage: W = ReflectionGroup((4,1,4)) # optional - gap3
sage: W.coxeter_element().reduced_word() # optional - gap3
[1, 2, 3, 4]
sage: W = ReflectionGroup((4,4,4)) # optional - gap3
sage: W.coxeter_element().reduced_word() # optional - gap3
[1, 2, 3, 4]
TESTS::
sage: WeylGroup(['A', 4]).coxeter_element().reduced_word()
[1, 2, 3, 4]
sage: SymmetricGroup(3).coxeter_element()
(1,3,2)
"""
return self.prod(self.simple_reflections())
@cached_method
def standard_coxeter_elements(self):
r"""
Return all standard Coxeter elements in ``self``.
This is the set of all elements in self obtained from any
product of the simple reflections in ``self``.
.. NOTE::
- ``self`` is assumed to be well-generated.
- This works even beyond real reflection groups, but the conjugacy
class is not unique and we only obtain one such class.
EXAMPLES::
sage: W = ReflectionGroup(4) # optional - gap3
sage: sorted(W.standard_coxeter_elements()) # optional - gap3
[(1,7,6,12,23,20)(2,8,17,24,9,5)(3,16,10,19,15,21)(4,14,11,22,18,13),
(1,10,4,12,21,22)(2,11,19,24,13,3)(5,15,7,17,16,23)(6,18,8,20,14,9)]
"""
if not self.is_irreducible() or not self.is_well_generated():
raise ValueError("this method is available for irreducible, well-generated complex reflection groups")
from sage.combinat.permutation import Permutations
return set(self.from_reduced_word(w) for w in Permutations(self._index_set))
def grassmannian_elements(self, side="right"):
"""
Return the left or right grassmanian elements of ``self``
as an enumerated set.
INPUT:
- ``side`` -- (default: ``"right"``) ``"left"`` or ``"right"``
EXAMPLES::
sage: S = CoxeterGroups().example()
sage: G = S.grassmannian_elements()
sage: G.cardinality()
12
sage: G.list()
[(0, 1, 2, 3), (1, 0, 2, 3), (0, 2, 1, 3), (0, 1, 3, 2),
(2, 0, 1, 3), (1, 2, 0, 3), (0, 3, 1, 2), (0, 2, 3, 1),
(3, 0, 1, 2), (1, 3, 0, 2), (1, 2, 3, 0), (2, 3, 0, 1)]
sage: sorted(tuple(w.descents()) for w in G)
[(), (0,), (0,), (0,), (1,), (1,), (1,), (1,), (1,), (2,), (2,), (2,)]
sage: G = S.grassmannian_elements(side = "left")
sage: G.cardinality()
12
sage: sorted(tuple(w.descents(side = "left")) for w in G)
[(), (0,), (0,), (0,), (1,), (1,), (1,), (1,), (1,), (2,), (2,), (2,)]
"""
order_side = "left" if side == "right" else "right"
return self.weak_order_ideal(attrcall("is_grassmannian", side=side),
side=order_side)
def _test_reduced_word(self, **options):
"""
Runs sanity checks on :meth:'CoxeterGroups.ElementMethods.reduced_word' and
:meth:`~sage.categories.complex_reflection_or_generalized_coxeter_groups.ComplexReflectionOrGeneralizedCoxeterGroups.ParentMethods.from_reduced_word`
EXAMPLES::
sage: W = CoxeterGroups().example()
sage: W._test_reduced_word()
"""
tester = self._tester(**options)
s = self.simple_reflections()
for x in tester.some_elements():
red = x.reduced_word()
tester.assertEquals(self.from_reduced_word(red), x)
tester.assertEquals(self.prod((s[i] for i in red)), x)
def simple_projection(self, i, side = 'right', length_increasing = True):
r"""
INPUT:
- ``i`` - an element of the index set of ``self``
Returns the simple projection `\pi_i` (or `\overline\pi_i` if `length_increasing` is False).
See :meth:`.simple_projections` for the options and for
the definition of the simple projections.
EXAMPLES::
sage: W = CoxeterGroups().example()
sage: W
The symmetric group on {0, ..., 3}
sage: s = W.simple_reflections()
sage: sigma=W.an_element()
sage: sigma
(1, 2, 3, 0)
sage: u0=W.simple_projection(0)
sage: d0=W.simple_projection(0,length_increasing=False)
sage: sigma.length()
3
sage: pi=sigma*s[0]
sage: pi.length()
4
sage: u0(sigma)
(2, 1, 3, 0)
sage: pi
(2, 1, 3, 0)
sage: u0(pi)
(2, 1, 3, 0)
sage: d0(sigma)
(1, 2, 3, 0)
sage: d0(pi)
(1, 2, 3, 0)
"""
if not (i in self.index_set() or i == 0):
raise ValueError("%s is not 0 and not in the Dynkin node set %s"%(i, self.index_set()))
return lambda x: x.apply_simple_projection(i, side = side, length_increasing = length_increasing)
@cached_method
def simple_projections(self, side = 'right', length_increasing = True):
r"""
Returns the family of simple projections, also known as 0-Hecke or Demazure operators.
INPUT:
- ``self`` - a Coxeter group `W`
- ``side`` - 'left' or 'right' (default: 'right')
- ``length_increasing`` - a boolean (default: True) specifying
whether the operator increases or decreases length
Returns the simple projections of `W`, as a family.
To each simple reflection `s_i` of `W`, corresponds a
*simple projection* `\pi_i` from `W` to `W` defined by:
`\pi_i(w) = w s_i` if `i` is not a descent of `w`
`\pi_i(w) = w` otherwise.
The simple projections `(\pi_i)_{i\in I}` move elements
down the right permutohedron, toward the maximal element.
They satisfy the same braid relations as the simple reflections,
but are idempotents `\pi_i^2=\pi` not involutions `s_i^2 = 1`. As such,
the simple projections generate the `0`-Hecke monoid.
By symmetry, one can also define the projections
`(\overline\pi_i)_{i\in I}` (when the option ``length_increasing`` is False):
`\overline\pi_i(w) = w s_i` if `i` is a descent of `w`
`\overline\pi_i(w) = w` otherwise.
as well as the analogues acting on the left (when the option ``side`` is 'left').
EXAMPLES::
sage: W = CoxeterGroups().example()
sage: W
The symmetric group on {0, ..., 3}
sage: s = W.simple_reflections()
sage: sigma=W.an_element()
sage: sigma
(1, 2, 3, 0)
sage: pi=W.simple_projections()
sage: pi
Finite family {0: <function <lambda> at ...>, 1: <function <lambda> at ...>, 2: <function <lambda> ...>}
sage: pi[1](sigma)
(1, 3, 2, 0)
sage: W.simple_projection(1)(sigma)
(1, 3, 2, 0)
"""
from sage.sets.family import Family
return Family(self.index_set(), lambda i: self.simple_projection(i, side = side, length_increasing = length_increasing))
def demazure_product(self,Q):
r"""
Returns the Demazure product of the list ``Q`` in ``self``.
INPUT:
- ``Q`` is a list of elements from the index set of ``self``.
This returns the Coxeter group element that represents the composition of 0-Hecke or Demazure operators.
See :meth:`CoxeterGroups.ParentMethods.simple_projections`.
EXAMPLES::
sage: W = WeylGroup(['A',2])
sage: w = W.demazure_product([2,2,1])
sage: w.reduced_word()
[2, 1]
sage: w = W.demazure_product([2,1,2,1,2])
sage: w.reduced_word()
[1, 2, 1]
sage: W = WeylGroup(['B',2])
sage: w = W.demazure_product([2,1,2,1,2])
sage: w.reduced_word()
[2, 1, 2, 1]
"""
return self.one().apply_demazure_product(Q)
def bruhat_interval(self, x, y):
"""
Returns the list of t such that x <= t <= y.
EXAMPLES::
sage: W = WeylGroup("A3", prefix="s")
sage: [s1,s2,s3]=W.simple_reflections()
sage: W.bruhat_interval(s2,s1*s3*s2*s1*s3)
[s1*s2*s3*s2*s1, s2*s3*s2*s1, s3*s1*s2*s1, s1*s2*s3*s1, s1*s2*s3*s2, s3*s2*s1, s2*s3*s1, s2*s3*s2, s1*s2*s1, s3*s1*s2, s1*s2*s3, s2*s1, s3*s2, s2*s3, s1*s2, s2]
sage: W = WeylGroup(['A',2,1], prefix="s")
sage: [s0,s1,s2]=W.simple_reflections()
sage: W.bruhat_interval(1,s0*s1*s2)
[s0*s1*s2, s1*s2, s0*s2, s0*s1, s2, s1, s0, 1]
"""
if x == 1:
x = self.one()
if y == 1:
y = self.one()
if x == y:
return [x]
ret = []
if not x.bruhat_le(y):
return ret
ret.append([y])
while ret[-1] != []:
nextlayer = []
for z in ret[-1]:
for t in z.bruhat_lower_covers():
if t not in nextlayer:
if x.bruhat_le(t):
nextlayer.append(t)
ret.append(nextlayer)
return flatten(ret)
def canonical_representation(self):
r"""
Return the canonical faithful representation of ``self``.
EXAMPLES::
sage: W = WeylGroup("A3")
sage: W.canonical_representation()
Finite Coxeter group over Universal Cyclotomic Field with Coxeter matrix:
[1 3 2]
[3 1 3]
[2 3 1]
"""
from sage.groups.matrix_gps.coxeter_group import CoxeterMatrixGroup
return CoxeterMatrixGroup(self.coxeter_matrix(),
index_set=self.index_set())
def elements_of_length(self, n):
r"""
Return all elements of length `n`.
EXAMPLES::
sage: A = AffinePermutationGroup(['A',2,1])
sage: [len(list(A.elements_of_length(i))) for i in [0..5]]
[1, 3, 6, 9, 12, 15]
sage: W = CoxeterGroup(['H',3])
sage: [len(list(W.elements_of_length(i))) for i in range(4)]
[1, 3, 5, 7]
sage: W = CoxeterGroup(['A',2])
sage: [len(list(W.elements_of_length(i))) for i in range(6)]
[1, 2, 2, 1, 0, 0]
"""
I = self.weak_order_ideal(ConstantFunction(True), side='right')
return I.elements_of_depth_iterator(n)
def random_element_of_length(self, n):
r"""
Return a random element of length ``n`` in ``self``.
Starts at the identity, then chooses an upper cover at random.
Not very uniform: actually constructs a uniformly random
reduced word of length `n`. Thus we most likely get
elements with lots of reduced words!
EXAMPLES::
sage: A = AffinePermutationGroup(['A', 7, 1])
sage: p = A.random_element_of_length(10)
sage: p in A
True
sage: p.length() == 10
True
sage: W = CoxeterGroup(['A', 4])
sage: p = W.random_element_of_length(5)
sage: p in W
True
sage: p.length() == 5
True
"""
from sage.misc.prandom import randint
x = self.one()
for i in xrange(1, n + 1):
antiD = x.descents(positive=True)
rnd = randint(0, len(antiD) - 1)
x = x.apply_simple_reflection_right(antiD[rnd])
return x
# TODO: Groups() should have inverse() call __invert__
# With strong doc stating that this is just a convenience for the user
# and links to ~ / __invert__
# parabolic_subgroup
def _test_simple_projections(self, **options):
"""
Runs sanity checks on :meth:`.simple_projections`
and :meth:`CoxeterGroups.ElementMethods.apply_simple_projection`
EXAMPLES::
sage: W = CoxeterGroups().example()
sage: W._test_simple_projections()
"""
tester = self._tester(**options)
for side in ['left', 'right']:
pi = self.simple_projections(side = side)
opi = self.simple_projections(side = side, length_increasing = False)
for i in self.index_set():
for w in tester.some_elements():
tester.assert_( pi[i](w) == w.apply_simple_projection(i, side = side))
tester.assert_( pi[i](w) == w.apply_simple_projection(i, side = side, length_increasing = True ))
tester.assert_(opi[i](w) == w.apply_simple_projection(i, side = side, length_increasing = False))
tester.assert_( pi[i](w).has_descent(i, side = side))
tester.assert_(not opi[i](w).has_descent(i, side = side))
tester.assertEquals(set([pi[i](w), opi[i](w)]),
set([w, w.apply_simple_reflection(i, side = side)]))
def _test_has_descent(self, **options):
"""
Runs sanity checks on the method
:meth:`CoxeterGroups.ElementMethods.has_descent` of the
elements of self.
EXAMPLES::
sage: W = CoxeterGroups().example()
sage: W._test_has_descent()
"""
tester = self._tester(**options)
s = self.simple_reflections()
for i in self.index_set():
tester.assert_(not self.one().has_descent(i))
tester.assert_(not self.one().has_descent(i, side = 'left'))
tester.assert_(not self.one().has_descent(i, side = 'right'))
tester.assert_(self.one().has_descent(i, positive = True))
tester.assert_(self.one().has_descent(i, positive = True, side = 'left'))
tester.assert_(self.one().has_descent(i, positive = True, side = 'right'))
for j in self.index_set():
tester.assertEquals(s[i].has_descent(j, side = 'left' ), i==j)
tester.assertEquals(s[i].has_descent(j, side = 'right'), i==j)
tester.assertEquals(s[i].has_descent(j ), i==j)
tester.assertEquals(s[i].has_descent(j, positive = True, side = 'left' ), i!=j)
tester.assertEquals(s[i].has_descent(j, positive = True, side = 'right'), i!=j)
tester.assertEquals(s[i].has_descent(j, positive = True, ), i!=j)
if i == j:
continue
u = s[i] * s[j]
v = s[j] * s[i]
tester.assert_((s[i]*s[j]).has_descent(i, side = 'left' ))
tester.assert_((s[i]*s[j]).has_descent(j, side = 'right'))
tester.assertEquals((s[i]*s[j]).has_descent(j, side = 'left' ), u == v)
tester.assertEquals((s[i]*s[j]).has_descent(i, side = 'right'), u == v)
def _test_descents(self, **options):
"""
Run sanity checks on the method
:meth:`CoxeterGroups.ElementMethods.descents` of the
elements of ``self``.
EXAMPLES::
sage: W = CoxeterGroups().example()
sage: W._test_descents()
"""
tester = self._tester(**options)
s = self.simple_reflections()
tester.assertEqual(len(self.one().descents(side='right')), 0)
tester.assertEqual(len(self.one().descents(side='left')), 0)
for i in self.index_set():
si = s[i]
tester.assertEqual([i], si.descents(side='left'))
tester.assertEqual([i], si.descents(side='right'))
tester.assertNotIn(i, si.descents(positive=True, side='left'))
tester.assertNotIn(i, si.descents(positive=True, side='right'))
class ElementMethods:
def has_descent(self, i, side = 'right', positive=False):
"""
Returns whether i is a (left/right) descent of self.
See :meth:`.descents` for a description of the options.
EXAMPLES::
sage: W = CoxeterGroups().example()
sage: s = W.simple_reflections()
sage: w = s[0] * s[1] * s[2]
sage: w.has_descent(2)
True
sage: [ w.has_descent(i) for i in [0,1,2] ]
[False, False, True]
sage: [ w.has_descent(i, side = 'left') for i in [0,1,2] ]
[True, False, False]
sage: [ w.has_descent(i, positive = True) for i in [0,1,2] ]
[True, True, False]
This default implementation delegates the work to
:meth:`.has_left_descent` and :meth:`.has_right_descent`.
"""
if not isinstance(positive, bool):
raise TypeError("%s is not a boolean"%(bool))
if side == 'right':
return self.has_right_descent(i) != positive
if side != 'left':
raise ValueError("%s is neither 'right' nor 'left'"%(side))
return self.has_left_descent(i) != positive
# @abstract_method(optional = True)
def has_right_descent(self, i):
"""
Returns whether ``i`` is a right descent of self.
EXAMPLES::
sage: W = CoxeterGroups().example(); W
The symmetric group on {0, ..., 3}
sage: w = W.an_element(); w
(1, 2, 3, 0)
sage: w.has_right_descent(0)
False
sage: w.has_right_descent(1)
False
sage: w.has_right_descent(2)
True
"""
return (~self).has_left_descent(i)
def has_left_descent(self, i):
"""
Returns whether `i` is a left descent of self.
This default implementation uses that a left descent of
`w` is a right descent of `w^{-1}`.
EXAMPLES::
sage: W = CoxeterGroups().example(); W
The symmetric group on {0, ..., 3}
sage: w = W.an_element(); w
(1, 2, 3, 0)
sage: w.has_left_descent(0)
True
sage: w.has_left_descent(1)
False
sage: w.has_left_descent(2)
False
TESTS::
sage: w.has_left_descent.__module__
'sage.categories.coxeter_groups'
"""
return (~self).has_right_descent(i)
def first_descent(self, side = 'right', index_set=None, positive=False):
"""
Returns the first left (resp. right) descent of self, as
ane element of ``index_set``, or ``None`` if there is none.
See :meth:`.descents` for a description of the options.
EXAMPLES::
sage: W = CoxeterGroups().example()
sage: s = W.simple_reflections()
sage: w = s[2]*s[0]
sage: w.first_descent()
0
sage: w = s[0]*s[2]
sage: w.first_descent()
0
sage: w = s[0]*s[1]
sage: w.first_descent()
1
"""
if index_set is None:
index_set = self.parent().index_set()
for i in index_set:
if self.has_descent(i, side = side, positive = positive):
return i
return None
def descents(self, side = 'right', index_set=None, positive=False):
"""
INPUT:
- ``index_set`` - a subset (as a list or iterable) of the nodes of the Dynkin diagram;
(default: all of them)
- ``side`` - 'left' or 'right' (default: 'right')
- ``positive`` - a boolean (default: ``False``)
Returns the descents of self, as a list of elements of the
index_set.
The ``index_set`` option can be used to restrict to the
parabolic subgroup indexed by ``index_set``.
If positive is ``True``, then returns the non-descents
instead
TODO: find a better name for ``positive``: complement? non_descent?
Caveat: the return type may change to some other iterable
(tuple, ...) in the future. Please use keyword arguments
also, as the order of the arguments may change as well.
EXAMPLES::
sage: W=CoxeterGroups().example()
sage: s=W.simple_reflections()
sage: w=s[0]*s[1]
sage: w.descents()
[1]
sage: w=s[0]*s[2]
sage: w.descents()
[0, 2]
TODO: side, index_set, positive
"""
if index_set is None:
index_set=self.parent().index_set()
return [ i for i in index_set if self.has_descent(i, side = side, positive = positive) ]
def is_grassmannian(self, side = "right"):
"""
INPUT:
- ``side`` - "left" or "right" (default: "right")
Tests whether ``self`` is Grassmannian, i.e. it has at
most one descent on the right (resp. on the left).
EXAMPLES::
sage: W = CoxeterGroups().example(); W
The symmetric group on {0, ..., 3}
sage: s = W.simple_reflections()
sage: W.one().is_grassmannian()
True
sage: s[1].is_grassmannian()
True
sage: (s[1]*s[2]).is_grassmannian()
True
sage: (s[0]*s[1]).is_grassmannian()
True
sage: (s[1]*s[2]*s[1]).is_grassmannian()
False
sage: (s[0]*s[2]*s[1]).is_grassmannian(side = "left")
False
sage: (s[0]*s[2]*s[1]).is_grassmannian(side = "right")
True
sage: (s[0]*s[2]*s[1]).is_grassmannian()
True
"""
return len(self.descents(side = side)) <= 1
def reduced_word_reverse_iterator(self):
"""
Return a reverse iterator on a reduced word for ``self``.
EXAMPLES::
sage: W=CoxeterGroups().example()
sage: s = W.simple_reflections()
sage: sigma = s[0]*s[1]*s[2]
sage: rI=sigma.reduced_word_reverse_iterator()
sage: [i for i in rI]
[2, 1, 0]
sage: s[0]*s[1]*s[2]==sigma
True
sage: sigma.length()
3
.. SEEALSO::
:meth:`.reduced_word`
Default implementation: recursively remove the first right
descent until the identity is reached (see :meth:`.first_descent` and
:meth:`~sage.categories.complex_reflection_or_generalized_coxeter_groups.ComplexReflectionOrGeneralizedCoxeterGroups.ElementMethods.apply_simple_reflection`).
"""
while True:
i = self.first_descent()
if i is None:
return
self = self.apply_simple_reflection(i, 'right')
yield i
def reduced_word(self):
r"""
Return a reduced word for ``self``.
This is a word `[i_1,i_2,\ldots,i_k]` of minimal length
such that
`s_{i_1} s_{i_2} \cdots s_{i_k} = \operatorname{self}`,
where the `s_i` are the simple reflections.
EXAMPLES::
sage: W=CoxeterGroups().example()
sage: s=W.simple_reflections()
sage: w=s[0]*s[1]*s[2]
sage: w.reduced_word()
[0, 1, 2]
sage: w=s[0]*s[2]
sage: w.reduced_word()
[2, 0]
.. SEEALSO::
- :meth:`.reduced_words`, :meth:`.reduced_word_reverse_iterator`,
- :meth:`length`, :meth:`reduced_word_graph`
"""
result = list(self.reduced_word_reverse_iterator())
return list(reversed(result))
#def lex_min_reduced_word(w):
# return list(reversed((w.inverse()).reduced_word()))
def support(self):
r"""
Return the support of ``self``, that is the simple reflections that
appear in the reduced expressions of ``self``.
OUTPUT:
The support of ``self`` as a set of integers
EXAMPLES::
sage: W = CoxeterGroups().example()
sage: w = W.from_reduced_word([1,2,1])
sage: w.support()
{1, 2}
"""
return set(self.reduced_word())
def has_full_support(self):
r"""
Return whether ``self`` has full support.
An element is said to have full support if its support contains
all simple reflections.
EXAMPLES::
sage: W = CoxeterGroups().example()
sage: w = W.from_reduced_word([1,2,1])
sage: w.has_full_support()
False
sage: w = W.from_reduced_word([1,2,1,0,1])
sage: w.has_full_support()
True
"""
return self.support() == set(self.parent().index_set())
def reduced_words(self):
r"""
Return all reduced words for ``self``.
See :meth:`reduced_word` for the definition of a reduced
word.
EXAMPLES::
sage: W=CoxeterGroups().example()
sage: s=W.simple_reflections()
sage: w=s[0]*s[2]
sage: w.reduced_words()
[[2, 0], [0, 2]]
sage: W=WeylGroup(['E',6])
sage: w=W.from_reduced_word([2,3,4,2])
sage: w.reduced_words()
[[3, 2, 4, 2], [2, 3, 4, 2], [3, 4, 2, 4]]
TODO: the result should be full featured finite enumerated
set (e.g. counting can be done much faster than iterating).
.. SEEALSO::
:meth:`.reduced_word`, :meth:`.reduced_word_reverse_iterator`,
:meth:`length`, :meth:`reduced_word_graph`
"""
descents = self.descents()
if descents == []:
return [[]]
else:
return [ r + [i]
for i in self.descents()
for r in (self.apply_simple_reflection(i)).reduced_words()
]
def reduced_word_graph(self):
r"""
Return the reduced word graph of ``self``.
The reduced word graph of an element `w` in a Coxeter group
is the graph whose vertices are the reduced words for `w`
(see :meth:`reduced_word` for a definition of this term),
and which has an `m`-colored edge between two reduced words
`x` and `y` whenever `x` and `y` differ by exactly one
length-`m` braid move (with `m \geq 2`).
This graph is always connected (a theorem due to Tits) and
has no multiple edges.
EXAMPLES::
sage: W = WeylGroup(['A',3], prefix='s')
sage: w0 = W.long_element()
sage: G = w0.reduced_word_graph()
sage: G.num_verts()
16
sage: len(w0.reduced_words())
16
sage: G.num_edges()
18
sage: len([e for e in G.edges() if e[2] == 2])
10
sage: len([e for e in G.edges() if e[2] == 3])
8
TESTS::
sage: p = Permutation([3,2,4,1])
sage: pp = WeylGroup(['A',3]).from_reduced_word(p.reduced_word())
sage: pp.reduced_word_graph()
Graph on 3 vertices
sage: w1 = W.one()
sage: G = w1.reduced_word_graph()
sage: G.num_verts()
1
sage: G.num_edges()
0
.. SEEALSO::
:meth:`.reduced_words`, :meth:`.reduced_word_reverse_iterator`,
:meth:`length`, :meth:`reduced_word`
"""
R = self.reduced_words()
from sage.graphs.graph import Graph
# Special case for when the graph does not contain any edges
if len(R) == 1:
return Graph({tuple(R[0]): []}, immutable=True)
P = self.parent()
edges = []
for i,x in enumerate(R):
x = tuple(x)
for y in R[i:]:
y = tuple(y)
# Check that the reduced expressions differ by only
# a single braid move
i = 0
while i < len(x) and x[i] == y[i]:
i += 1
if i == len(x):
continue
a, b = x[i], y[i]
m = P.coxeter_matrix()[a,b]
subword = [a,b] * (m // 2)
subword2 = [b,a] * (m // 2)
if m % 2 != 0:
subword.append(a)
subword2.append(b)
if (x[i:i+m] != tuple(subword)
or y[i:i+m] != tuple(subword2)
or x[i+m:] != y[i+m:]):
continue
edges.append([x, y, m])
G = Graph(edges, immutable=True, format="list_of_edges")
colors = {2: 'blue', 3: 'red', 4: 'green'}
G.set_latex_options(edge_labels=True, color_by_label=lambda x: colors[x])
return G
def length(self):
r"""
Return the length of ``self``.
This is the minimal length of
a product of simple reflections giving ``self``.
EXAMPLES::
sage: W = CoxeterGroups().example()
sage: s1 = W.simple_reflection(1)
sage: s2 = W.simple_reflection(2)
sage: s1.length()
1
sage: (s1*s2).length()
2
sage: W = CoxeterGroups().example()
sage: s = W.simple_reflections()
sage: w = s[0]*s[1]*s[0]
sage: w.length()
3
sage: W = CoxeterGroups().example()
sage: sum((x^w.length()) for w in W) - expand(prod(sum(x^i for i in range(j+1)) for j in range(4))) # This is scandalously slow!!!
0
.. SEEALSO::
:meth:`.reduced_word`
.. TODO::
Should use reduced_word_iterator (or reverse_iterator)
"""
return len(self.reduced_word())
def absolute_length(self):
"""
Return the absolute length of ``self``.
The absolute length is the length of the shortest expression
of the element as a product of reflections.
For permutations in the symmetric groups, the absolute
length is the size minus the number of its disjoint
cycles.
.. SEEALSO::
:meth:`absolute_le`
EXAMPLES::
sage: W = WeylGroup(["A", 3])
sage: s = W.simple_reflections()
sage: (s[1]*s[2]*s[3]).absolute_length()
3
sage: W = SymmetricGroup(4)
sage: s = W.simple_reflections()
sage: (s[3]*s[2]*s[1]).absolute_length()
3
"""
M = self.canonical_matrix()
return (M - 1).image().dimension()
def absolute_le(self, other):
r"""
Return whether ``self`` is smaller than ``other`` in the absolute
order.
A general reflection is an element of the form `w s_i w^{-1}`,
where `s_i` is a simple reflection. The absolute order is defined
analogously to the weak order but using general reflections rather
than just simple reflections.
This partial order can be used to define noncrossing partitions
associated with this Coxeter group.
.. SEEALSO::
:meth:`absolute_length`
EXAMPLES::
sage: W = WeylGroup(["A", 3])
sage: s = W.simple_reflections()
sage: w0 = s[1]
sage: w1 = s[1]*s[2]*s[3]
sage: w0.absolute_le(w1)
True
sage: w1.absolute_le(w0)
False
sage: w1.absolute_le(w1)
True
"""
if self == other:
return True
if self.absolute_length() >= other.absolute_length():
return False
return self.absolute_length() + (self.inverse() * other).absolute_length() == other.absolute_length()
def absolute_covers(self):
r"""
Return the list of covers of ``self`` in absolute order.
.. SEEALSO::
:meth:`absolute_length`
EXAMPLES::
sage: W = WeylGroup(["A", 3])
sage: s = W.simple_reflections()
sage: w0 = s[1]
sage: w1 = s[1]*s[2]*s[3]
sage: w0.absolute_covers()
[
[0 0 1 0] [0 1 0 0] [0 0 0 1] [0 1 0 0] [0 1 0 0]
[1 0 0 0] [1 0 0 0] [1 0 0 0] [0 0 1 0] [0 0 0 1]
[0 1 0 0] [0 0 0 1] [0 0 1 0] [1 0 0 0] [0 0 1 0]
[0 0 0 1], [0 0 1 0], [0 1 0 0], [0 0 0 1], [1 0 0 0]
]
"""
W = self.parent()
return [self * t for t in W.reflections()
if self.absolute_length() < (self * t).absolute_length()]
def canonical_matrix(self):
r"""
Return the matrix of ``self`` in the canonical faithful
representation.
This is an `n`-dimension real faithful essential representation,
where `n` is the number of generators of the Coxeter group.
Note that this is not always the most natural matrix
representation, for instance in type `A_n`.
EXAMPLES::
sage: W = WeylGroup(["A", 3])
sage: s = W.simple_reflections()
sage: (s[1]*s[2]*s[3]).canonical_matrix()
[ 0 0 -1]
[ 1 0 -1]
[ 0 1 -1]
"""
G = self.parent().canonical_representation()
return G.prod(G.simple_reflection(i) for i in self.reduced_word()).matrix()
def coset_representative(self, index_set, side = 'right'):
r"""
INPUT:
- ``index_set`` - a subset (or iterable) of the nodes of the Dynkin diagram
- ``side`` - 'left' or 'right'
Returns the unique shortest element of the Coxeter group
$W$ which is in the same left (resp. right) coset as
``self``, with respect to the parabolic subgroup $W_I$.
EXAMPLES::
sage: W = CoxeterGroups().example(5)
sage: s = W.simple_reflections()
sage: w = s[2]*s[1]*s[3]
sage: w.coset_representative([]).reduced_word()
[2, 3, 1]
sage: w.coset_representative([1]).reduced_word()
[2, 3]
sage: w.coset_representative([1,2]).reduced_word()
[2, 3]
sage: w.coset_representative([1,3] ).reduced_word()
[2]
sage: w.coset_representative([2,3] ).reduced_word()
[2, 1]
sage: w.coset_representative([1,2,3] ).reduced_word()
[]
sage: w.coset_representative([], side = 'left').reduced_word()
[2, 3, 1]
sage: w.coset_representative([1], side = 'left').reduced_word()
[2, 3, 1]
sage: w.coset_representative([1,2], side = 'left').reduced_word()
[3]
sage: w.coset_representative([1,3], side = 'left').reduced_word()
[2, 3, 1]
sage: w.coset_representative([2,3], side = 'left').reduced_word()
[1]
sage: w.coset_representative([1,2,3], side = 'left').reduced_word()
[]
"""
while True:
i = self.first_descent(side = side, index_set = index_set)
if i is None:
return self
self = self.apply_simple_reflection(i, side = side)
def apply_simple_projection(self, i, side = 'right', length_increasing = True):
r"""
INPUT:
- ``i`` - an element of the index set of the Coxeter group
- ``side`` - 'left' or 'right' (default: 'right')
- ``length_increasing`` - a boolean (default: True) specifying
the direction of the projection
Returns the result of the application of the simple
projection `\pi_i` (resp. `\overline\pi_i`) on ``self``.
See :meth:`CoxeterGroups.ParentMethods.simple_projections`
for the definition of the simple projections.
EXAMPLE::
sage: W=CoxeterGroups().example()
sage: w=W.an_element()
sage: w
(1, 2, 3, 0)
sage: w.apply_simple_projection(2)
(1, 2, 3, 0)
sage: w.apply_simple_projection(2, length_increasing=False)
(1, 2, 0, 3)
sage: W = WeylGroup(['C',4],prefix="s")
sage: v = W.from_reduced_word([1,2,3,4,3,1])
sage: v
s1*s2*s3*s4*s3*s1
sage: v.apply_simple_projection(2)
s1*s2*s3*s4*s3*s1*s2
sage: v.apply_simple_projection(2, side='left')
s1*s2*s3*s4*s3*s1
sage: v.apply_simple_projection(1, length_increasing = False)
s1*s2*s3*s4*s3
"""
if self.has_descent(i, side = side, positive = length_increasing):
return self.apply_simple_reflection(i, side=side)
return self
def binary_factorizations(self, predicate = ConstantFunction(True)):
"""
Returns the set of all the factorizations `self = u v` such
that `l(self) = l(u) + l(v)`.
Iterating through this set is Constant Amortized Time
(counting arithmetic operations in the Coxeter group as
constant time) complexity, and memory linear in the length
of `self`.
One can pass as optional argument a predicate p such that
`p(u)` implies `p(u')` for any `u` left factor of `self`
and `u'` left factor of `u`. Then this returns only the
factorizations `self = uv` such `p(u)` holds.
EXAMPLES:
We construct the set of all factorizations of the maximal
element of the group::
sage: W = WeylGroup(['A',3])
sage: s = W.simple_reflections()
sage: w0 = W.from_reduced_word([1,2,3,1,2,1])
sage: w0.binary_factorizations().cardinality()
24
The same number of factorizations, by bounded length::
sage: [w0.binary_factorizations(lambda u: u.length() <= l).cardinality() for l in [-1,0,1,2,3,4,5,6]]
[0, 1, 4, 9, 15, 20, 23, 24]
The number of factorizations of the elements just below
the maximal element::
sage: [(s[i]*w0).binary_factorizations().cardinality() for i in [1,2,3]]
[12, 12, 12]
sage: w0.binary_factorizations(lambda u: False).cardinality()
0
TESTS::
sage: w0.binary_factorizations().category()
Category of finite enumerated sets
"""
from sage.combinat.backtrack import SearchForest
W = self.parent()
if not predicate(W.one()):
from sage.sets.finite_enumerated_set import FiniteEnumeratedSet
return FiniteEnumeratedSet([])
s = W.simple_reflections()
def succ(u_v):
(u, v) = u_v
for i in v.descents(side = 'left'):
u1 = u * s[i]
if i == u1.first_descent() and predicate(u1):
yield (u1, s[i]*v)
return SearchForest(((W.one(), self),), succ, category = FiniteEnumeratedSets())
@cached_in_parent_method
def bruhat_lower_covers(self):
"""
Returns all elements that ``self`` covers in (strong) Bruhat order.
If ``w = self`` has a descent at `i`, then the elements that
`w` covers are exactly `\{ws_i, u_1s_i, u_2s_i,..., u_js_i\}`,
where the `u_k` are elements that `ws_i` covers that also
do not have a descent at `i`.
EXAMPLES::
sage: W = WeylGroup(["A",3])
sage: w = W.from_reduced_word([3,2,3])
sage: print([v.reduced_word() for v in w.bruhat_lower_covers()])
[[3, 2], [2, 3]]
sage: W = WeylGroup(["A",3])
sage: print([v.reduced_word() for v in W.simple_reflection(1).bruhat_lower_covers()])
[[]]
sage: print([v.reduced_word() for v in W.one().bruhat_lower_covers()])
[]
sage: W = WeylGroup(["B",4,1])
sage: w = W.from_reduced_word([0,2])
sage: print([v.reduced_word() for v in w.bruhat_lower_covers()])
[[2], [0]]
We now show how to construct the Bruhat poset::
sage: W = WeylGroup(["A",3])
sage: covers = tuple([u, v] for v in W for u in v.bruhat_lower_covers() )
sage: P = Poset((W, covers), cover_relations = True)
sage: P.show()
Alternatively, one can just use::
sage: P = W.bruhat_poset()
The algorithm is taken from Stembridge's 'coxeter/weyl' package for Maple.
"""
desc = self.first_descent()
if desc is not None:
ww = self.apply_simple_reflection(desc)
return [u.apply_simple_reflection(desc) for u in ww.bruhat_lower_covers() if not u.has_descent(desc)] + [ww]
else:
return []
@cached_in_parent_method
def bruhat_upper_covers(self):
r"""
Returns all elements that cover ``self`` in (strong) Bruhat order.
The algorithm works recursively, using the 'inverse' of the method described for
lower covers :meth:`bruhat_lower_covers`. Namely, it runs through all `i` in the
index set. Let `w` equal ``self``. If `w` has no right descent `i`, then `w s_i` is a cover;
if `w` has a decent at `i`, then `u_j s_i` is a cover of `w` where `u_j` is a cover
of `w s_i`.
EXAMPLES::
sage: W = WeylGroup(['A',3,1], prefix="s")
sage: w = W.from_reduced_word([1,2,1])
sage: w.bruhat_upper_covers()
[s1*s2*s1*s0, s1*s2*s0*s1, s0*s1*s2*s1, s3*s1*s2*s1, s2*s3*s1*s2, s1*s2*s3*s1]
sage: W = WeylGroup(['A',3])
sage: w = W.long_element()
sage: w.bruhat_upper_covers()
[]
sage: W = WeylGroup(['A',3])
sage: w = W.from_reduced_word([1,2,1])
sage: S = [v for v in W if w in v.bruhat_lower_covers()]
sage: C = w.bruhat_upper_covers()
sage: set(S) == set(C)
True
"""
Covers = []
for i in self.parent().index_set():
if i in self.descents():
Covers += [ x.apply_simple_reflection(i) for x in self.apply_simple_reflection(i).bruhat_upper_covers()
if i not in x.descents() ]
else:
Covers += [ self.apply_simple_reflection(i) ]
return uniq(Covers)
@cached_in_parent_method
def bruhat_lower_covers_reflections(self):
r"""
Returns all 2-tuples of lower_covers and reflections (``v``, ``r``) where ``v`` is covered by ``self`` and ``r`` is the reflection such that ``self`` = ``v`` ``r``.
ALGORITHM:
See :meth:`.bruhat_lower_covers`
EXAMPLES::
sage: W = WeylGroup(['A',3], prefix="s")
sage: w = W.from_reduced_word([3,1,2,1])
sage: w.bruhat_lower_covers_reflections()
[(s1*s2*s1, s1*s2*s3*s2*s1), (s3*s2*s1, s2), (s3*s1*s2, s1)]
"""
i = self.first_descent()
if i is None:
return []
wi = self.apply_simple_reflection(i)
return [(u.apply_simple_reflection(i),r.apply_conjugation_by_simple_reflection(i)) for u,r in wi.bruhat_lower_covers_reflections() if not u.has_descent(i)] + [(wi, self.parent().simple_reflection(i))]
def lower_cover_reflections(self, side = 'right'):
r"""
Returns the reflections ``t`` such that ``self`` covers ``self`` ``t``.
If ``side`` is 'left', ``self`` covers ``t`` ``self``.
EXAMPLES::
sage: W = WeylGroup(['A',3],prefix="s")
sage: w = W.from_reduced_word([3,1,2,1])
sage: w.lower_cover_reflections()
[s1*s2*s3*s2*s1, s2, s1]
sage: w.lower_cover_reflections(side = 'left')
[s2*s3*s2, s3, s1]
"""
if side == 'left':
self = self.inverse()
return [x[1] for x in self.bruhat_lower_covers_reflections()]
@cached_in_parent_method
def bruhat_upper_covers_reflections(self):
r"""
Returns all 2-tuples of covers and reflections (``v``, ``r``) where ``v`` covers ``self`` and ``r`` is the reflection such that ``self`` = ``v`` ``r``.
ALGORITHM:
See :meth:`.bruhat_upper_covers`
EXAMPLES::
sage: W = WeylGroup(['A',4], prefix="s")
sage: w = W.from_reduced_word([3,1,2,1])
sage: w.bruhat_upper_covers_reflections()
[(s1*s2*s3*s2*s1, s3), (s2*s3*s1*s2*s1, s2*s3*s2), (s3*s4*s1*s2*s1, s4), (s4*s3*s1*s2*s1, s1*s2*s3*s4*s3*s2*s1)]
"""
Covers = []
for i in self.parent().index_set():
wi = self.apply_simple_reflection(i)
if i in self.descents():
Covers += [(u.apply_simple_reflection(i), r.apply_conjugation_by_simple_reflection(i)) for u,r in wi.bruhat_upper_covers_reflections() if i not in u.descents()]
else:
Covers += [(wi,self.parent().simple_reflection(i))]
return uniq(Covers)
def cover_reflections(self, side = 'right'):
r"""
Returns the set of reflections ``t`` such that ``self`` ``t`` covers ``self``.
If ``side`` is 'left', ``t`` ``self`` covers ``self``.
EXAMPLES::
sage: W = WeylGroup(['A',4], prefix="s")
sage: w = W.from_reduced_word([3,1,2,1])
sage: w.cover_reflections()
[s3, s2*s3*s2, s4, s1*s2*s3*s4*s3*s2*s1]
sage: w.cover_reflections(side = 'left')
[s4, s2, s1*s2*s1, s3*s4*s3]
"""
if side == 'left':
self = self.inverse()
return [x[1] for x in self.bruhat_upper_covers_reflections()]
@cached_in_parent_method
def bruhat_le(self, other):
"""
Bruhat comparison
INPUT:
- other - an element of the same Coxeter group
OUTPUT: a boolean
Returns whether ``self`` <= ``other`` in the Bruhat order.
EXAMPLES::
sage: W = WeylGroup(["A",3])
sage: u = W.from_reduced_word([1,2,1])
sage: v = W.from_reduced_word([1,2,3,2,1])
sage: u.bruhat_le(u)
True
sage: u.bruhat_le(v)
True
sage: v.bruhat_le(u)
False
sage: v.bruhat_le(v)
True
sage: s = W.simple_reflections()
sage: s[1].bruhat_le(W.one())
False
The implementation uses the equivalent condition that any
reduced word for ``other`` contains a reduced word for
``self`` as subword. See Stembridge, A short derivation of
the Möbius function for the Bruhat order. J. Algebraic
Combin. 25 (2007), no. 2, 141--148, Proposition 1.1.
Complexity: `O(l * c)`, where `l` is the minimum of the
lengths of `u` and of `v`, and `c` is the cost of the low
level methods :meth:`first_descent`, :meth:`has_descent`,
:meth:`~sage.categories.complex_reflection_or_generalized_coxeter_groups.ComplexReflectionOrGeneralizedCoxeterGroups.ElementMethods.apply_simple_reflection`),
etc. Those are typically `O(n)`, where `n` is the rank of the
Coxeter group.
TESTS:
We now run consistency tests with permutations and
:meth:`bruhat_lower_covers`::
sage: W = WeylGroup(["A",3])
sage: P4 = Permutations(4)
sage: def P4toW(w): return W.from_reduced_word(w.reduced_word())
sage: for u in P4:
... for v in P4:
... assert u.bruhat_lequal(v) == P4toW(u).bruhat_le(P4toW(v))
sage: W = WeylGroup(["B",3])
sage: P = W.bruhat_poset() # This is built from bruhat_lower_covers
sage: Q = Poset((W, attrcall("bruhat_le"))) # long time (10s)
sage: all( u.bruhat_le(v) == P.is_lequal(u,v) for u in W for v in W ) # long time (7s)
True
sage: all( P.is_lequal(u,v) == Q.is_lequal(u,v) for u in W for v in W) # long time (9s)
True
"""
if not have_same_parent(self, other):
raise TypeError("%s and %s do not have the same parent"%(self, other))
# could first compare the length, when that information is cheap
desc = other.first_descent()
if desc is not None:
return self.apply_simple_projection(desc, length_increasing = False).bruhat_le(other.apply_simple_reflection(desc))
else:
return self == other
def weak_le(self, other, side = 'right'):
"""
comparison in weak order
INPUT:
- other - an element of the same Coxeter group
- side - 'left' or 'right' (default: 'right')
OUTPUT: a boolean
Returns whether ``self`` <= ``other`` in left
(resp. right) weak order, that is if 'v' can be obtained
from 'v' by length increasing multiplication by simple
reflections on the left (resp. right).
EXAMPLES::
sage: W = WeylGroup(["A",3])
sage: u = W.from_reduced_word([1,2])
sage: v = W.from_reduced_word([1,2,3,2])
sage: u.weak_le(u)
True
sage: u.weak_le(v)
True
sage: v.weak_le(u)
False
sage: v.weak_le(v)
True
Comparison for left weak order is achieved with the option ``side``::
sage: u.weak_le(v, side = 'left')
False
The implementation uses the equivalent condition that any
reduced word for `u` is a right (resp. left) prefix of
some reduced word for `v`.
Complexity: `O(l * c)`, where `l` is the minimum of the
lengths of `u` and of `v`, and `c` is the cost of the low
level methods :meth:`first_descent`, :meth:`has_descent`,
:meth:`~sage.categories.complex_reflection_or_generalized_coxeter_groups.ComplexReflectionOrGeneralizedCoxeterGroups.ElementMethods.apply_simple_reflection`),
etc. Those are typically `O(n)`, where `n` is the rank of the
Coxeter group.
We now run consistency tests with permutations::
sage: W = WeylGroup(["A",3])
sage: P4 = Permutations(4)
sage: def P4toW(w): return W.from_reduced_word(w.reduced_word())
sage: for u in P4: # long time (5s on sage.math, 2011)
....: for v in P4:
....: assert u.permutohedron_lequal(v) == P4toW(u).weak_le(P4toW(v))
....: assert u.permutohedron_lequal(v, side='left') == P4toW(u).weak_le(P4toW(v), side='left')
"""
if not have_same_parent(self, other):
raise TypeError("%s and %s do not have the same parent"%(self,other))
# could first compare the length, when that information is cheap
prefix_side = 'left' if side == 'right' else 'right'
while True:
desc = self.first_descent(side = prefix_side)
if desc is None:
return True
if not other.has_descent(desc, side = prefix_side):
return False
self = self.apply_simple_reflection(desc, side = prefix_side)
other = other.apply_simple_reflection(desc, side = prefix_side)
def weak_covers(self, side = 'right', index_set = None, positive = False):
"""
Returns all elements that ``self`` covers in weak order.
INPUT:
- side - 'left' or 'right' (default: 'right')
- positive - a boolean (default: False)
- index_set - a list of indices or None
OUTPUT: a list
EXAMPLES::
sage: W = WeylGroup(['A',3])
sage: w = W.from_reduced_word([3,2,1])
sage: [x.reduced_word() for x in w.weak_covers()]
[[3, 2]]
To obtain instead elements that cover self, set ``positive = True``::
sage: [x.reduced_word() for x in w.weak_covers(positive = True)]
[[3, 1, 2, 1], [2, 3, 2, 1]]
To obtain covers for left weak order, set the option side to 'left'::
sage: [x.reduced_word() for x in w.weak_covers(side='left')]
[[2, 1]]
sage: w = W.from_reduced_word([3,2,3,1])
sage: [x.reduced_word() for x in w.weak_covers()]
[[2, 3, 2], [3, 2, 1]]
sage: [x.reduced_word() for x in w.weak_covers(side='left')]
[[3, 2, 1], [2, 3, 1]]
Covers w.r.t. a parabolic subgroup are obtained with the option ``index_set``::
sage: [x.reduced_word() for x in w.weak_covers(index_set = [1,2])]
[[2, 3, 2]]
"""
return [ self.apply_simple_reflection(i, side=side)
for i in self.descents(side=side, index_set = index_set, positive = positive) ]
def coxeter_sorting_word(self,c):
r"""
Return the ``c``-sorting word of ``self``.
For a Coxeter element `c` and an element `w`, the `c`-sorting
word of `w` is the lexicographic minimal reduced expression of
`w` in the infinite word `c^\infty`.
INPUT:
- ``c``-- a Coxeter element.
OUTPUT:
the ``c``-sorting word of ``self`` as a list of integers.
EXAMPLES::
sage: W = CoxeterGroups().example()
sage: c = W.from_reduced_word([0,2,1])
sage: w = W.from_reduced_word([1,2,1,0,1])
sage: w.coxeter_sorting_word(c)
[2, 1, 2, 0, 1]
"""
if hasattr(c,"reduced_word"):
c = c.reduced_word()
elif not isinstance(c,list):
c = list(c)
n = self.parent().rank()
pi = self
l = pi.length()
i = 0
sorting_word = []
while l > 0:
s = c[i]
if pi.has_left_descent(s):
pi = pi.apply_simple_reflection_left(s)
l -= 1
sorting_word.append(s)
i += 1
if i == n:
i = 0
return sorting_word
def is_coxeter_sortable(self,c,sorting_word=None):
r"""
Return whether ``self`` is ``c``-sortable.
Given a Coxeter element `c`, an element `w` is `c`-sortable if
its `c`-sorting word decomposes into a sequence of weakly
decreasing subwords of `c`.
INPUT:
- ``c`` -- a Coxeter element.
- ``sorting_word`` -- sorting word (default: None) used to
not recompute the ``c``-sorting word if already computed.
OUTPUT:
is ``self`` ``c``-sortable
EXAMPLES::
sage: W = CoxeterGroups().example()
sage: c = W.from_reduced_word([0,2,1])
sage: w = W.from_reduced_word([1,2,1,0,1])
sage: w.coxeter_sorting_word(c)
[2, 1, 2, 0, 1]
sage: w.is_coxeter_sortable(c)
False
sage: w = W.from_reduced_word([0,2,1,0,2])
sage: w.coxeter_sorting_word(c)
[2, 0, 1, 2, 0]
sage: w.is_coxeter_sortable(c)
True
sage: W = CoxeterGroup(['A',3])
sage: c = W.from_reduced_word([1,2,3])
sage: len([w for w in W if w.is_coxeter_sortable(c)]) # number of c-sortable elements in A_3 (Catalan number)
14
"""
if hasattr(c,"reduced_word"):
c = c.reduced_word()
elif not isinstance(c,list):
c = list(c)
if sorting_word is None:
sorting_word = self.coxeter_sorting_word(c)
n = len(c)
containment_list = [ True ]*n
l = 0
i = 0
while l < len(sorting_word):
s = c[i]
t = sorting_word[l]
if s == t:
l += 1
if not containment_list[i]:
return False
else:
containment_list[i] = False
i += 1
if i == n:
i = 0
return True
def apply_demazure_product(self, element, side = 'right', length_increasing = True):
r"""
Returns the Demazure or 0-Hecke product of ``self`` with another Coxeter group element.
See :meth:`CoxeterGroups.ParentMethods.simple_projections`.
INPUT:
- ``element`` -- either an element of the same Coxeter
group as ``self`` or a tuple or a list (such as a
reduced word) of elements from the index set of the
Coxeter group.
- ``side`` -- 'left' or 'right' (default: 'right'); the
side of ``self`` on which the element should be
applied. If ``side`` is 'left' then the operation is
applied on the left.
- ``length_increasing`` -- a boolean (default True)
whether to act length increasingly or decreasingly
EXAMPLES::
sage: W = WeylGroup(['C',4],prefix="s")
sage: v = W.from_reduced_word([1,2,3,4,3,1])
sage: v.apply_demazure_product([1,3,4,3,3])
s4*s1*s2*s3*s4*s3*s1
sage: v.apply_demazure_product([1,3,4,3],side='left')
s3*s4*s1*s2*s3*s4*s2*s3*s1
sage: v.apply_demazure_product((1,3,4,3),side='left')
s3*s4*s1*s2*s3*s4*s2*s3*s1
sage: v.apply_demazure_product(v)
s2*s3*s4*s1*s2*s3*s4*s2*s3*s2*s1
"""
# if self and element have the same parent
if self.parent().is_parent_of(element):
the_word = element.reduced_word()
else:
# check for a list or tuple of elements of the index set
if isinstance(element, (tuple)):
element = [x for x in element]
if not isinstance(element, (list)):
raise TypeError("Bad Coxeter group element input: %s"%(element))
I = self.parent().index_set()
if not all(i in I for i in element):
raise ValueError("%s does not have all its members in the index set of the %s"%(element, self.parent()))
# the copy is so that if we need to reverse the list, the original will not
# get reversed
the_word = copy(element)
if side == 'left':
the_word.reverse()
for i in the_word:
self = self.apply_simple_projection(i, side = side, length_increasing = length_increasing)
return self
def min_demazure_product_greater(self, element):
r"""
Finds the unique Bruhat-minimum element ``u`` such that ``v`` $\le$ ``w`` * ``u`` where ``v`` is ``self``, ``w`` is ``element`` and ``*`` is the Demazure product.
INPUT:
- ``element`` is either an element of the same Coxeter group as ``self`` or a list (such as a reduced word) of elements from the index set of the Coxeter group.
EXAMPLES::
sage: W = WeylGroup(['A',4],prefix="s")
sage: v = W.from_reduced_word([2,3,4,1,2])
sage: u = W.from_reduced_word([2,3,2,1])
sage: v.min_demazure_product_greater(u)
s4*s2
sage: v.min_demazure_product_greater([2,3,2,1])
s4*s2
sage: v.min_demazure_product_greater((2,3,2,1))
s4*s2
"""
# if self and element have the same parent
if self.parent().is_parent_of(element):
the_word = element.reduced_word()
# else require that ``element`` is a list or tuple of index_set elements
else:
if not isinstance(element, (tuple,list)):
raise TypeError("Bad Coxeter group element input: %s"%(element))
I = self.parent().index_set()
if not all(i in I for i in element):
raise ValueError("%s does not have all its members in the index set of the %s"%(element, self.parent()))
the_word = element
for i in the_word:
if self.has_descent(i, side = 'left'):
self = self.apply_simple_reflection(i, side = 'left')
return self
def deodhar_factor_element(self, w, index_set):
r"""
Returns Deodhar's Bruhat order factoring element.
INPUT:
- ``w`` is an element of the same Coxeter group ``W`` as ``self``
- ``index_set`` is a subset of Dynkin nodes defining a parabolic subgroup ``W'`` of ``W``
It is assumed that ``v = self`` and ``w`` are minimum length coset representatives
for ``W/W'`` such that ``v`` $\le$ ``w`` in Bruhat order.
OUTPUT:
Deodhar's element ``f(v,w)`` is the unique element of ``W'`` such that,
for all ``v'`` and ``w'`` in ``W'``, ``vv'`` $\le$ ``ww'`` in ``W`` if and only if
``v'`` $\le$ ``f(v,w) * w'`` in ``W'`` where ``*`` is the Demazure product.
EXAMPLES::
sage: W = WeylGroup(['A',5],prefix="s")
sage: v = W.from_reduced_word([5])
sage: w = W.from_reduced_word([4,5,2,3,1,2])
sage: v.deodhar_factor_element(w,[1,3,4])
s3*s1
sage: W=WeylGroup(['C',2])
sage: w=W.from_reduced_word([2,1])
sage: w.deodhar_factor_element(W.from_reduced_word([2]),[1])
Traceback (most recent call last):
...
ValueError: [2, 1] is not of minimum length in its coset for the parabolic subgroup with index set [1]
REFERENCES:
.. [Deodhar] \V. Deodhar, A splitting criterion for the Bruhat orderings on Coxeter groups. Comm. Algebra, 15:1889-1894, 1987.
"""
if self != self.coset_representative(index_set):
raise ValueError("%s is not of minimum length in its coset for the parabolic subgroup with index set %s"%(self.reduced_word(),index_set))
if w != w.coset_representative(index_set):
raise ValueError("%s is not of minimum length in its coset for the parabolic subgroup with index set %s"%(w.reduced_word(),index_set))
if not self.bruhat_le(w):
raise ValueError("Must have %s <= %s"%(self.reduced_word(), w.reduced_word()))
if w.is_one():
return w
i = w.first_descent(side = 'left')
sw = w.apply_simple_reflection(i, side = 'left')
sv = self.apply_simple_reflection(i, side = 'left')
if self.has_descent(i, side = 'left'):
return sv.deodhar_factor_element(sw, index_set)
dsp = self.deodhar_factor_element(sw, index_set)
des = sv.first_descent(side = 'right', index_set = index_set)
if des is None:
return dsp
return dsp.apply_simple_projection(des, side = 'left')
def deodhar_lift_up(self, w, index_set):
"""
Letting ``v = self``, given a Bruhat relation ``v W'`` $\le$ ``w W'`` among cosets
with respect to the subgroup ``W'`` given by the Dynkin node subset ``index_set``,
returns the Bruhat-minimum lift ``x`` of ``wW'`` such that ``v`` $\le$ ``x``.
INPUT:
- ``w`` is an element of the same Coxeter group ``W`` as ``self``.
- ``index_set`` is a subset of Dynkin nodes defining a parabolic subgroup ``W'``.
OUTPUT:
The unique Bruhat-minimum element ``x`` in ``W`` such that ``x W' = w W'``
and ``v`` $\le$ ``x``.
.. SEEALSO:: :meth:`sage.categories.coxeter_groups.CoxeterGroups.ElementMethods.deodhar_lift_down`
EXAMPLES::
sage: W = WeylGroup(['A',3],prefix="s")
sage: v = W.from_reduced_word([1,2,3])
sage: w = W.from_reduced_word([1,3,2])
sage: v.deodhar_lift_up(w, [3])
s1*s2*s3*s2
"""
vmin = self.coset_representative(index_set)
wmin = w.coset_representative(index_set)
if not vmin.bruhat_le(wmin):
raise ValueError("Must have %s <= %s mod the parabolic subgroup with index set %s"%(self.reduced_word(), w.reduced_word(), index_set))
vJ = vmin.inverse() * self
dsp = vmin.deodhar_factor_element(wmin,index_set)
return wmin * vJ.min_demazure_product_greater(dsp)
def deodhar_lift_down(self, w, index_set):
r"""
Letting ``v = self``, given a Bruhat relation ``v W'`` $\ge$ ``w W'`` among cosets
with respect to the subgroup ``W'`` given by the Dynkin node subset ``index_set``,
returns the Bruhat-maximum lift ``x`` of ``wW'`` such that ``v`` $\ge$ ``x``.
INPUT:
- ``w`` is an element of the same Coxeter group ``W`` as ``self``.
- ``index_set`` is a subset of Dynkin nodes defining a parabolic subgroup ``W'``.
OUTPUT:
The unique Bruhat-maximum element ``x`` in ``W`` such that ``x W' = w W'``
and ``v $\ge$ ``x``.
.. SEEALSO:: :meth:`sage.categories.coxeter_groups.CoxeterGroups.ElementMethods.deodhar_lift_up`
EXAMPLES::
sage: W = WeylGroup(['A',3],prefix="s")
sage: v = W.from_reduced_word([1,2,3,2])
sage: w = W.from_reduced_word([3,2])
sage: v.deodhar_lift_down(w, [3])
s2*s3*s2
"""
vmin = self.coset_representative(index_set)
wmin = w.coset_representative(index_set)
if not wmin.bruhat_le(vmin):
raise ValueError("Must have %s <= %s mod the parabolic subgroup with index set %s"%(w.reduced_word(), self.reduced_word(), index_set))
vJ = vmin.inverse() * self
dsp = wmin.deodhar_factor_element(vmin,index_set)
return wmin * dsp.apply_demazure_product(vJ)
@cached_in_parent_method
def inversions_as_reflections(self):
r"""
Returns the set of reflections ``r`` such that ``self`` ``r < self``.
EXAMPLES::
sage: W = WeylGroup(['A',3], prefix="s")
sage: w = W.from_reduced_word([3,1,2,1])
sage: w.inversions_as_reflections()
[s1, s1*s2*s1, s2, s1*s2*s3*s2*s1]
"""
i = self.first_descent()
if i is None:
return []
wi = self.apply_simple_reflection(i)
return [self.parent().simple_reflection(i)]+[u.apply_conjugation_by_simple_reflection(i) for u in wi.inversions_as_reflections()]
def left_inversions_as_reflections(self):
r"""
Returns the set of reflections ``r`` such that ``r`` ``self`` < ``self``.
EXAMPLES::
sage: W = WeylGroup(['A',3], prefix="s")
sage: w = W.from_reduced_word([3,1,2,1])
sage: w.left_inversions_as_reflections()
[s1, s3, s1*s2*s3*s2*s1, s2*s3*s2]
"""
return self.inverse().inversions_as_reflections()
def lower_covers(self, side = 'right', index_set = None):
"""
Returns all elements that ``self`` covers in weak order.
INPUT:
- side - 'left' or 'right' (default: 'right')
- index_set - a list of indices or None
OUTPUT: a list
EXAMPLES::
sage: W = WeylGroup(['A',3])
sage: w = W.from_reduced_word([3,2,1])
sage: [x.reduced_word() for x in w.lower_covers()]
[[3, 2]]
To obtain covers for left weak order, set the option side to 'left'::
sage: [x.reduced_word() for x in w.lower_covers(side='left')]
[[2, 1]]
sage: w = W.from_reduced_word([3,2,3,1])
sage: [x.reduced_word() for x in w.lower_covers()]
[[2, 3, 2], [3, 2, 1]]
Covers w.r.t. a parabolic subgroup are obtained with the option ``index_set``::
sage: [x.reduced_word() for x in w.lower_covers(index_set = [1,2])]
[[2, 3, 2]]
sage: [x.reduced_word() for x in w.lower_covers(side='left')]
[[3, 2, 1], [2, 3, 1]]
"""
return self.weak_covers(side = side, index_set = index_set, positive = False)
def upper_covers(self, side = 'right', index_set = None):
"""
Returns all elements that cover ``self`` in weak order.
INPUT:
- side - 'left' or 'right' (default: 'right')
- index_set - a list of indices or None
OUTPUT: a list
EXAMPLES::
sage: W = WeylGroup(['A',3])
sage: w = W.from_reduced_word([2,3])
sage: [x.reduced_word() for x in w.upper_covers()]
[[2, 3, 1], [2, 3, 2]]
To obtain covers for left weak order, set the option ``side`` to 'left'::
sage: [x.reduced_word() for x in w.upper_covers(side = 'left')]
[[1, 2, 3], [2, 3, 2]]
Covers w.r.t. a parabolic subgroup are obtained with the option ``index_set``::
sage: [x.reduced_word() for x in w.upper_covers(index_set = [1])]
[[2, 3, 1]]
sage: [x.reduced_word() for x in w.upper_covers(side = 'left', index_set = [1])]
[[1, 2, 3]]
"""
return self.weak_covers(side = side, index_set = index_set, positive = True)
|
the-stack_0_26809
|
'''
Reciprocal cycles
Problem 26
A unit fraction contains 1 in the numerator. The decimal representation of the unit fractions with denominators 2 to 10 are given:
1/2 = 0.5
1/3 = 0.(3)
1/4 = 0.25
1/5 = 0.2
1/6 = 0.1(6)
1/7 = 0.(142857)
1/8 = 0.125
1/9 = 0.(1)
1/10 = 0.1
Where 0.1(6) means 0.166666..., and has a 1-digit recurring cycle. It can be seen that 1/7 has a 6-digit recurring cycle.
Find the value of d < 1000 for which 1/d contains the longest recurring cycle in its decimal fraction part. '''
def rec_cyc(a):
#print("in rec_cyc , for : ", a)
dec = []
s = 10
cyc = 0
rems = [1]
divided = True
while (True):
#print(s , dec)
if s < a:
dec.append(0)
cyc = cyc + 1
s = s * 10
else:
r = s % a
f = int(s / a)
dec.append(f)
if r == 0:
cyc = 0
#print("Divided .. for " ,a , " dec =", dec," rem = ", rems)
break
elif r in rems:
rems.append(r)
first_pos = 0
for i in range(0, len(rems)):
if rems[i] == r:
first_pos = i + 1
break
cyc = len(rems) - first_pos
#print("Not divided ... for " ,a , " dec =", dec," rem = ", rems , ", first_pos = " ,first_pos, " len(rems)=" , len(rems))
break
else:
rems.append(r)
s = r * 10
return (cyc)
max_res = 0
max_int = 0
for i in range(2, 1000):
r = rec_cyc(i)
if r > max_res:
max_res = r
max_int = i
#print("Result = " , i,r)
print("max_res ", max_res, " max_int :", max_int)
|
the-stack_0_26812
|
import os
from setuptools import setup, find_packages
if os.path.exists('README.rst'):
with open('README.rst') as file:
long_description = file.read()
else:
long_description = ''
setup(
name="spinoff",
description="Framework for writing distributed, fault tolerant and scalable internet applications",
long_description=long_description,
version="0.7.18",
packages=find_packages(),
install_requires=[
'zope.interface',
'pyzmq==13.1',
'gevent==1.0',
],
author="Erik Allik",
author_email="[email protected]",
license="BSD",
url="http://github.com/eallik/spinoff/",
entry_points={
'console_scripts': [
'spin = spinoff.actor.spin:console',
]
},
)
|
the-stack_0_26813
|
# Copyright © 2020 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Suite to ensure the PPR registration summary schema is valid."""
import pytest
from registry_schemas import validate
TEST_VALID_ALL = {
'registrationNumber': '9000100B',
'baseRegistrationNumber': '9000100B',
'statusType': 'ACT',
'registrationType': 'SA',
'registrationDescription': 'PPSA SECURITY AGREEMENT',
'registrationClass': 'PPSALIEN',
'expireDays': 1422,
'clientReferenceId': 'T-0000001',
'path': '/ppr/api/v1/financing-statements/9000100B',
'createDateTime': '2021-06-03T22:58:45+00:00',
'lastUpdateDateTime': '2021-06-03T23:03:45+00:00',
'registeringParty': 'Bank of British Columbia',
'securedParties': 'Bank of British Columbia',
'registeringName': 'Michael Smith',
'inUserList': False
}
TEST_VALID_MINIMUM = {
'registrationNumber': '9000100B',
'registrationType': 'SA',
'registrationClass': 'PPSALIEN',
'path': '/ppr/api/v1/financing-statements/9000100B',
'createDateTime': '2021-06-03T22:58:45+00:00'
}
TEST_INVALID_STATUS_TYPE = {
'registrationNumber': '9000100B',
'baseRegistrationNumber': '9000100B',
'statusType': 'XXX',
'registrationType': 'SA',
'registrationDescription': 'PPSA SECURITY AGREEMENT',
'registrationClass': 'PPSALIEN',
'expireDays': 1422,
'clientReferenceId': 'T-0000001',
'path': '/ppr/api/v1/financing-statements/9000100B',
'createDateTime': '2021-06-03T22:58:45+00:00',
'lastUpdateDateTime': '2021-06-03T23:03:45+00:00',
'registeringParty': 'Bank of British Columbia',
'securedParties': 'Bank of British Columbia',
'registeringName': 'Michael Smith',
'inUserList': False
}
TEST_INVALID_REG_TYPE = {
'registrationNumber': '9000100B',
'baseRegistrationNumber': '9000100B',
'statusType': 'ACT',
'registrationType': 'SAX',
'registrationDescription': 'PPSA SECURITY AGREEMENT',
'registrationClass': 'PPSALIEN',
'expireDays': 1422,
'clientReferenceId': 'T-0000001',
'path': '/ppr/api/v1/financing-statements/9000100B',
'createDateTime': '2021-06-03T22:58:45+00:00',
'lastUpdateDateTime': '2021-06-03T23:03:45+00:00',
'registeringParty': 'Bank of British Columbia',
'securedParties': 'Bank of British Columbia',
'registeringName': 'Michael Smith',
'inUserList': False
}
TEST_INVALID_REG_NUMBER = {
'registrationNumber': '9000100BXXX',
'baseRegistrationNumber': '9000100B',
'statusType': 'ACT',
'registrationType': 'SA',
'registrationDescription': 'PPSA SECURITY AGREEMENT',
'registrationClass': 'PPSALIEN',
'expireDays': 1422,
'clientReferenceId': 'T-0000001',
'path': '/ppr/api/v1/financing-statements/9000100B',
'createDateTime': '2021-06-03T22:58:45+00:00',
'lastUpdateDateTime': '2021-06-03T23:03:45+00:00',
'registeringParty': 'Bank of British Columbia',
'securedParties': 'Bank of British Columbia',
'registeringName': 'Michael Smith',
'inUserList': False
}
TEST_INVALID_EXPIRE_DAYS = {
'registrationNumber': '9000100B',
'baseRegistrationNumber': '9000100B',
'statusType': 'ACT',
'registrationType': 'SA',
'registrationDescription': 'PPSA SECURITY AGREEMENT',
'registrationClass': 'PPSALIEN',
'expireDays': 'wrong',
'clientReferenceId': 'T-0000001',
'path': '/ppr/api/v1/financing-statements/9000100B',
'createDateTime': '2021-06-03T22:58:45+00:00',
'lastUpdateDateTime': '2021-06-03T23:03:45+00:00',
'registeringParty': 'Bank of British Columbia',
'securedParties': 'Bank of British Columbia',
'registeringName': 'Michael Smith',
'inUserList': False
}
TEST_INVALID_MISSING_REG_NUM = {
'registrationType': 'SA',
'registrationClass': 'PPSALIEN',
'path': '/ppr/api/v1/financing-statements/9000100B',
'createDateTime': '2021-06-03T22:58:45+00:00'
}
TEST_INVALID_MISSING_REG_TYPE = {
'registrationNumber': '9000100B',
'registrationClass': 'PPSALIEN',
'path': '/ppr/api/v1/financing-statements/9000100B',
'createDateTime': '2021-06-03T22:58:45+00:00'
}
TEST_INVALID_MISSING_REG_CLASS = {
'registrationNumber': '9000100B',
'registrationType': 'SA',
'path': '/ppr/api/v1/financing-statements/9000100B',
'createDateTime': '2021-06-03T22:58:45+00:00'
}
TEST_INVALID_MISSING_PATH = {
'registrationNumber': '9000100B',
'registrationType': 'SA',
'registrationClass': 'PPSALIEN',
'createDateTime': '2021-06-03T22:58:45+00:00'
}
TEST_INVALID_MISSING_CREATE_TS = {
'registrationNumber': '9000100B',
'registrationType': 'SA',
'registrationClass': 'PPSALIEN',
'path': '/ppr/api/v1/financing-statements/9000100B'
}
TEST_EMPTY_JSON = {
}
TEST_UNKNOWN_JSON = {
'registrationNumber': '9000100B',
'registrationType': 'SA',
'registrationClass': 'PPSALIEN',
'path': '/ppr/api/v1/financing-statements/9000100B',
'createDateTime': '2021-06-03T22:58:45+00:00',
'unknown': 'xxxx'
}
# testdata pattern is ({description}, {is valid}, {data})
TEST_DATA = [
('All valid', True, TEST_VALID_ALL),
('Minimum valid', True, TEST_VALID_MINIMUM),
('Invalid status type', False, TEST_INVALID_STATUS_TYPE),
('Invalid reg type length', False, TEST_INVALID_REG_TYPE),
('Invalid reg number length', False, TEST_INVALID_REG_NUMBER),
('Invalid expiry days value', False, TEST_INVALID_EXPIRE_DAYS),
('Invalid missing reg num', False, TEST_INVALID_MISSING_REG_NUM),
('Invalid missing reg type', False, TEST_INVALID_MISSING_REG_TYPE),
('Invalid missing reg class', False, TEST_INVALID_MISSING_REG_CLASS),
('Invalid missing path', False, TEST_INVALID_MISSING_PATH),
('Invalid missing create_ts', False, TEST_INVALID_MISSING_CREATE_TS),
('No settings', False, TEST_EMPTY_JSON),
('Unknown ignored setting', True, TEST_UNKNOWN_JSON)
]
@pytest.mark.parametrize('desc,valid,data', TEST_DATA)
def test_registration_summary(desc, valid, data):
"""Assert that the schema is performing as expected for a registration summary."""
is_valid, errors = validate(data, 'registrationSummary', 'ppr')
if errors:
# print(errors)
for err in errors:
print(err.message)
assert is_valid == valid
|
the-stack_0_26814
|
import matplotlib.pyplot as plt
from visualize.helpers.colors import color_rainbow
from visualize.helpers.data import load_pickles, save_file, get_values, filter_data
from analyze.scope_parse.c_get_lines import get_vol_cur_dir
import numpy as np
# data = load_pickle("G:/Prive/MIJN-Documenten/TU/62-Stage/20180103/run2-1us/data.pkl")
# data = load_pickles('G:/Prive/MIJN-Documenten/TU/62-Stage/20180111')
data = load_pickles('20180104-500hz')
data = filter_data(data, input_v__gt=600)
lw = 0.4 # linewidth
w = np.unique(get_values(data, 'input_l'))
colors = color_rainbow(len(w))
fig, ax = plt.subplots(2, 1)
tit = fig.suptitle('Waveforms (1kHz, 26$\mu$H, avg32)')
for i, iw in enumerate(w):
l = str(iw) + '$\mu$s'
d = [d for d in data if d['input_l'] == iw]
v = get_values(d, 'output_v_pulse')
y = get_values(d, 'o3_gramsec')
ei = get_values(d, 'input_p')
eo = get_values(d, 'output_e_rise') * get_values(d, 'input_f')
# l=str(line['input_voltage_output'] / 1000) + 'kV'
ax[0].plot(v, y, label=l, color=colors[i], linewidth=lw)
ax[1].plot(v, ei, label=l + ' Pin', color=colors[i], linewidth=lw)
ax[1].plot(v, eo, label=l + 'Pout', color=colors[i], linewidth=lw)
ax[0].set_ylabel('Concentration [gram/s]')
ax[1].set_ylabel('Power [W]')
plt.xlabel('Voltage [V]')
ax[0].grid(True)
ax[1].grid(True)
lgd = ax[0].legend(loc='upper left', bbox_to_anchor=(1, 1))
plt.show()
save_file(fig, name='v_ep', bbox_extra_artists=(lgd, tit,))
|
the-stack_0_26815
|
import sys
import csv
import re
from operator import itemgetter
from phrase_sentiment import extract_sentiment_values, compute_sentiment
def extract_actors_tweets(csv_file_name):
with open(csv_file_name) as csv_file:
file_reader = csv.DictReader(csv_file)
actors_tweets = {}
for row in file_reader:
if actors_tweets.get(row['user_name']):
actors_tweets[row['user_name']].append(row['tweet'])
else:
actors_tweets[row['user_name']] = [row['tweet']]
return actors_tweets
def compute_sentiment_for_tweets(tweets, sentiment_values):
cumulative_sentiment = 0
for tweet in tweets:
_, sentiment_val = compute_sentiment(tweet, sentiment_values)
cumulative_sentiment += sentiment_val
avg_sentiment = cumulative_sentiment / len(tweets)
return avg_sentiment
def sort_results(actors_sentiment_scores):
results = sorted(list(actors_sentiment_scores.items()), key=itemgetter(1, 0), reverse=True)
return results
def main():
sent_file_name = sys.argv[1]
sentiment_data = extract_sentiment_values(sent_file_name)
csv_file_name = sys.argv[2]
actors_tweets = extract_actors_tweets(csv_file_name)
actors_sentiment_scores = {}
for actor in actors_tweets.keys():
actors_sentiment_scores[actor] = compute_sentiment_for_tweets(actors_tweets[actor], sentiment_data)
results = sort_results(actors_sentiment_scores)
for i in results:
print(i[0] + '\t' + str(i[1]))
if __name__ == '__main__':
main()
|
the-stack_0_26816
|
# Copyright 2018 NTRlab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
from __future__ import print_function
import os
import subprocess
from setuptools import setup, find_packages
data_files = []
if os.path.exists("/etc/default"):
data_files.append(
('/etc/default', ['packaging/systemd/sawtooth-pbft-engine']))
if os.path.exists("/lib/systemd/system"):
data_files.append(
('/lib/systemd/system',
['packaging/systemd/sawtooth-pbft-engine.service']))
setup(
name='bgx-pbft-engine',
version=subprocess.check_output(
['../../../bin/get_version']).decode('utf-8').strip(),
description='BGX PBFT Consensus Engine',
author='Hyperledger Sawtooth-bgx',
url='https://github.com/hyperledger/sawtooth-core',
packages=find_packages(),
install_requires=[
"cbor",
"colorlog",
"cryptography",
"sawtooth-sdk",
"sawtooth-signing",
"toml",
"cbor"
],
data_files=data_files,
entry_points={
'console_scripts': [
'pbft-engine = sawtooth_pbft_engine.main:main'
]
})
|
the-stack_0_26818
|
#!/usr/bin/env python
from tkinter import Label, Button, END
from tkinter.tix import Tk, Control, ComboBox
top = Tk()
top.tk.eval('package require Tix')
lb = Label(top, text = 'Animals (in parirs; min : pair, max : dozen)')
lb.pack()
ct = Control(top, label = 'Number:', integer = True, max = 12, min = 2, value = 2, step = 2)
ct.label.config(font = 'Helvetica -14 bold')
ct.pack()
cb = ComboBox(top, label = 'Type:', editable = True)
for animal in ('dog', 'cat', 'hamster', 'python'):
cb.insert(END, animal)
cb.pack()
qb = Button(top, text = 'Quit', command = top.quit, bg = 'red', fg = 'white')
qb.pack()
top.mainloop()
|
the-stack_0_26819
|
import komand
from .schema import ManagerInfoInput, ManagerInfoOutput
# Custom imports below
import json
import requests
class ManagerInfo(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='manager_info',
description='Returns basic information about the Manager',
input=ManagerInfoInput(),
output=ManagerInfoOutput())
def run(self, params={}):
api = '/manager/info'
url = '{url}{api}'.format(url=self.connection.url, api=api)
self.logger.info('Request: %s', url)
try:
resp = requests.get(url, auth=self.connection.creds)
self.logger.info('Raw Response: %s', resp.json())
info = resp.json()['data']
info['error'] = resp.json()['error']
except requests.exceptions.HTTPError:
self.logger.error('Requests: HTTPError: status code %s for %s' % (str(resp.status_code), url))
raise Exception('Requests: Connect: Failed response from server {}'.format(url))
self.logger.info('Normalized Response: %s', info)
return info
def test(self):
# {'error': 0, 'data': 'Welcome to Wazuh HIDS API'}
url = self.connection.url
try:
resp = requests.get(url, auth=self.connection.creds)
r = resp.json()
self.logger.info('Raw Response: %s', r)
except requests.exceptions.HTTPError:
self.logger.error('Requests: HTTPError: status code %s for %s' % (str(resp.status_code), url))
raise Exception('Requests: Connect: Failed response from server {}'.format(url))
if r['error'] == 0:
# Example must match spec to succeed due to required's
return {
"installation_date": "Sat Apr 22 14:04:15 UTC 2017",
"tz_offset": "+0000",
"max_agents": "8000",
"tz_name": "UTC",
"error": 0,
"path": "/var/ossec",
"type": "server",
"ruleset_version": "v2.0",
"openssl_support": "yes",
"version": "v2.0"
}
else:
self.logger.error(r)
raise Exception('Requests: Connect: Failed response from server {}'.format(url))
|
the-stack_0_26820
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import requests
import json
import time
import re
from xml.dom.minidom import Node, Document, parseString
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
USERNAME = demisto.params()['credentials']['identifier']
PASSWORD = demisto.params()['credentials']['password']
AUTH = ('super/' + USERNAME, PASSWORD)
VERIFY_SSL = not demisto.params().get('unsecure', False)
HOST = demisto.params()['host']
QUERY_URL = HOST + "/phoenix/rest/query/"
REST_ADDRESS = HOST + "/phoenix/rest/h5"
EXTENDED_KEYS = {} # type: dict
def load_extended_keys():
global EXTENDED_KEYS
if demisto.command() == 'fetch-incidents':
last_run = demisto.getLastRun()
EXTENDED_KEYS = last_run.get('extended_keys', {})
else:
integration_context = demisto.getIntegrationContext()
EXTENDED_KEYS = integration_context.get('extended_keys', {})
if not EXTENDED_KEYS:
session = login()
url = REST_ADDRESS + '/eventAttributeType/all'
response = session.get(url, verify=VERIFY_SSL, auth=AUTH)
EXTENDED_KEYS = dict((attr['attributeId'], attr['displayName']) for attr in response.json())
if demisto.command() != 'fetch-incidents':
demisto.setIntegrationContext({'extended_keys': EXTENDED_KEYS})
def parse_resource_type(resource_type):
type_to_url_path = {
'Reports': 'report',
'Rules': 'rule',
'Networks': 'resource/network',
'Watch Lists': 'rule/wl',
'Protocols': 'resource/port',
'Event Type': 'eventType',
'Malware IP': 'mal/ip',
'Malware Domains': 'mal/site',
'Malware Urls': 'mal/url',
'Malware Hash': 'mal/hash',
'Malware Processes': 'mal/proc',
'Country Groups': 'resource/geo',
'Default Password': 'mal/pwd',
'Anonymity Network': 'mal/proxy',
'User Agents': 'mal/agent',
'Remediations': 'remediation',
}
return type_to_url_path.get(resource_type, resource_type)
@logger
def validateSuccessfulResponse(resp, error_text):
if resp.status_code != 200:
return_error('Got response status {} when {}'.format(resp.status_code, error_text))
@logger
def login():
session = requests.session()
login_url = HOST + '/phoenix/login-html.jsf'
response = session.get(login_url, verify=VERIFY_SSL)
# get the VIEW_STATE from the xml returned in the UI login page.
p = re.compile('(value=".{1046}==")')
viewState = p.findall(response.text.encode('utf-8'))
VIEW_STATE = viewState[0][len('value="'):][:-1]
data = {
'loginHtml': 'loginHtml',
'loginHtml:username': USERNAME,
'loginHtml:password': PASSWORD,
'loginHtml:userDomain': 'Empty',
'loginHtml:loginBtn': 'Log In',
'loginHtml:domain': 'super',
'javax.faces.ViewState': VIEW_STATE
}
headers = {
'Upgrade-Insecure-Requests': '1',
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,'
'application/signed-exchange;v=b3;q=0.9',
'Accept-Language': 'en-US,en;q=0.9,pt-PT;q=0.8,pt;q=0.7'
}
response = session.post(login_url, headers=headers, data=data, verify=VERIFY_SSL) # type: ignore
return session
def clear_incident_command():
args = demisto.args()
incident_id = args['incident_id']
reason = args.get('close_reason', '')
raw_response = clear_incident(incident_id, reason)
return_outputs("Incident cleared successfully.", {}, raw_response)
@logger
def clear_incident(incident_id, reason):
session = login()
headers = {
'Accept': 'application/json, text/plain, */*',
'Content-Type': 'application/json'
}
response = session.put(
HOST + '/phoenix/rest/h5/incident/clear',
params={'ids': [incident_id], 'user': USERNAME},
headers=headers,
data=reason,
verify=VERIFY_SSL)
validateSuccessfulResponse(response, "triggering events report")
return response.text
@logger
def getEventsByIncident(incident_id, max_results, extended_data, max_wait_time):
session = login()
# response = session.get(HOST + '/phoenix/rest/h5/report/triggerEvent?rawMsg=' + incident_id)
# validateSuccessfulResponse(response, "triggering events report")
#
# try:
# jsonRes = response.json()
# queryData = jsonRes[0]['right']
# except (ValueError, KeyError):
# return_error("Got wrong response format when triggering events report. "
# "Expected a json array but got:\n" + response.text)
queryData = {
"isReportService": True,
"selectClause": "eventSeverityCat,incidentLastSeen,eventName,incidentRptDevName,incidentSrc,incidentTarget,"
"incidentDetail,incidentStatus,incidentReso,incidentId,eventType,incidentTicketStatus,"
"bizService,count,incidentClearedTime,incidentTicketUser,incidentNotiRecipients,"
"incidentClearedReason,incidentComments,eventSeverity,incidentFirstSeen,incidentRptIp,"
"incidentTicketId,customer,incidentNotiStatus,incidentClearedUser,incidentExtUser,"
"incidentExtClearedTime,incidentExtResoTime,incidentExtTicketId,incidentExtTicketState,"
"incidentExtTicketType,incidentViewStatus,rawEventMsg,phIncidentCategory,phSubIncidentCategory,"
"incidentRptDevStatus",
"eventFilters": [{"name": "Filter_OVERALL_STATUS",
"singleConstraint": "(phEventCategory = 1) AND incidentId = {}".format(incident_id)}],
"hints": "IgnoreTime",
}
return getEventsByQuery(session, queryData, max_results, extended_data, max_wait_time,
"FortiSIEM events for Incident " + incident_id, incident_id=incident_id)
@logger
def getEventsByQuery(session, queryData, max_results, extended_data, max_wait_time, tableTitle, incident_id=None):
headers = {
'Accept': 'application/json, text/plain, */*',
'Content-Type': 'application/json'
}
response = session.post(REST_ADDRESS + '/report/run', headers=headers, data=json.dumps(queryData),
verify=VERIFY_SSL)
validateSuccessfulResponse(response, "running report")
data = response.json()
data["report"] = queryData
data = json.dumps(data)
# poll until report progress reaches 100
response = session.post(REST_ADDRESS + '/report/reportProgress', headers=headers, data=data, verify=VERIFY_SSL)
# response contain the percentage of the report loading
while response.text != "100" and max_wait_time > 0:
response = session.post(REST_ADDRESS + '/report/reportProgress', headers=headers, data=data, verify=VERIFY_SSL)
max_wait_time = int(max_wait_time) - 1
time.sleep(1)
params = {
'start': 0,
'perPage': max_results,
'allData': extended_data,
}
response = session.post(REST_ADDRESS + '/report/resultByReport', params=params, headers=headers, data=data,
verify=VERIFY_SSL)
try:
res = response.json()
eventKeys = res["headerData"]["columnNames"]
except (ValueError, KeyError):
return_error("Got wrong response format when getting report results. "
"Expected a json object but got:\n" + response.text)
# reformat results
eventData = []
md = ""
for key in res["lightValueObjects"]:
cur = {
'Event ID': key.get("naturalId", ""),
'Incident ID': incident_id,
}
for i in range(0, len(eventKeys)):
if len(key["data"]) == 0 or key["data"][0] == "No report results found.":
md = "No report results found."
break
else:
cur[eventKeys[i]] = key["data"][i]
if md != "":
# no results were found, not need to loop
break
cur["ExtendedData"] = {}
for extItem in key["extData"]:
if EXTENDED_KEYS.get(extItem["left"]) is not None:
cur[EXTENDED_KEYS.get(extItem["left"]).replace(' ', '')] = extItem["right"] # type: ignore
else:
cur["ExtendedData"][extItem["left"]] = extItem["right"]
eventData.append(cur)
md = tableToMarkdown(tableTitle, eventData, eventKeys) if md == "" else md
demisto.results({
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': res,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md,
'EntryContext': {'FortiSIEM.Events(val["Event ID"] && val["Event ID"] == obj["Event ID"])': eventData}
})
@logger
def GetEventQuery():
in_xml = create_query_xml("all", interval='1')
url = QUERY_URL + "eventQuery"
headers = {'Content-Type': 'text/xml'}
resp = requests.request('POST', url, headers=headers, data=in_xml, verify=VERIFY_SSL, auth=AUTH)
validateSuccessfulResponse(resp, "fetching event query")
queryId = resp.text
if 'error code="255"' in queryId:
return_error("Got error code 255 while getting event query. Make sure the query has valid syntax")
return queryId
@logger
def GetIncidentsByOrg(queryId):
# The request will poll until the server completes the query.
url = QUERY_URL + "progress/" + queryId
resp = requests.request('GET', url, verify=VERIFY_SSL, auth=AUTH)
while resp.text != '100':
resp = requests.request('GET', url, verify=VERIFY_SSL, auth=AUTH)
outXML = []
if resp.text == '100':
url = QUERY_URL + 'events/' + queryId + '/0/1000'
resp = requests.request('GET', url, verify=VERIFY_SSL, auth=AUTH)
content = resp.text
if content != '':
outXML.append(content)
# this code is taken directly from their documentation.
# get all results (last "page" has less than 1000 records)
p = re.compile(r'totalCount="\d+"')
mlist = p.findall(content)
if mlist and mlist[0] != '':
mm = mlist[0].replace('"', '')
m = mm.split("=")[-1]
num = 0
if int(m) > 1000:
num = int(m) / 1000
if int(m) % 1000 > 0:
num += 1
if num > 0:
for i in range(num):
url = QUERY_URL + 'events/' + queryId + '/' + str(i * 1000 + 1) + '/1000'
resp = requests.request('GET', url, verify=VERIFY_SSL, auth=AUTH)
content = resp.text
if content != '':
outXML.append(content)
else:
sys.exit(0)
phCustId = "all"
param = dumpXML(outXML, phCustId)
return param
@logger
def create_query_xml(include_value, interval="", single_evt_value="phEventCategory=1", interval_type="Minute",
attr_list=None, limit="All"):
doc = Document()
reports = doc.createElement("Reports")
doc.appendChild(reports)
report = doc.createElement("Report")
report.setAttribute("id", "")
report.setAttribute("group", "report")
reports.appendChild(report)
name = doc.createElement("Name")
report.appendChild(name)
doc.createTextNode("All Incidents")
custScope = doc.createElement("CustomerScope")
custScope.setAttribute("groupByEachCustomer", "true")
report.appendChild(custScope)
include = doc.createElement("Include")
if include_value == "all":
include.setAttribute("all", "true")
custScope.appendChild(include)
else:
custScope.appendChild(include)
include_text = doc.createTextNode(include_value)
include.appendChild(include_text)
exclude = doc.createElement("Exclude")
custScope.appendChild(exclude)
description = doc.createElement("description")
report.appendChild(description)
select = doc.createElement("SelectClause")
select.setAttribute("numEntries", limit)
report.appendChild(select)
attrList = doc.createElement("AttrList")
if attr_list:
attr_text = doc.createTextNode(str(attr_list))
attrList.appendChild(attr_text)
select.appendChild(attrList)
reportInterval = doc.createElement("ReportInterval")
report.appendChild(reportInterval)
window = doc.createElement("Window")
window.setAttribute("unit", interval_type)
window.setAttribute("val", interval)
reportInterval.appendChild(window)
pattern = doc.createElement("PatternClause")
pattern.setAttribute("window", "3600")
report.appendChild(pattern)
subPattern = doc.createElement("SubPattern")
subPattern.setAttribute("displayName", "Events")
subPattern.setAttribute("name", "Events")
pattern.appendChild(subPattern)
single = doc.createElement("SingleEvtConstr")
subPattern.appendChild(single)
single_text = doc.createTextNode(single_evt_value)
single.appendChild(single_text)
_filter = doc.createElement("RelevantFilterAttr")
report.appendChild(_filter)
return doc.toxml()
@logger
def dumpXML(xmlList, phCustId):
param = []
for xml in xmlList:
doc = parseString(xml.encode('utf-8'))
for node in doc.getElementsByTagName("events"):
for node1 in node.getElementsByTagName("event"):
mapping = {}
for node2 in node1.getElementsByTagName("attributes"):
for node3 in node2.getElementsByTagName("attribute"):
item_name = node3.getAttribute("name")
for node4 in node3.childNodes:
if node4.nodeType == Node.TEXT_NODE:
mapping[item_name] = node4.data
if phCustId == "all" or mapping['phCustId'] == phCustId:
param.append(mapping)
return param
@logger
def buildQueryString(args):
res_list = []
for key in args:
if 'IpAddr' not in key:
res_list.append('{} = "{}"'.format(key, args[key]))
else:
res_list.append("{} = {}".format(key, args[key]))
return " AND ".join(res_list)
@logger
def getEventsByFilter(maxResults, extendedData, maxWaitTime, reportWindow, reportWindowUnit):
session = login()
args = demisto.args()
del args["maxResults"]
del args["extendedData"]
del args["maxWaitTime"]
del args["reportWindow"]
del args["reportWindowUnit"]
query_string = buildQueryString(args)
query_data = {
"isReportService": True,
"selectClause": "phRecvTime,reptDevIpAddr,eventType,eventName,rawEventMsg,destIpAddr",
"reportWindow": int(reportWindow),
"reportWindowUnit": reportWindowUnit,
"timeRangeRelative": True,
"eventFilters": [{
"groupBy": "",
"singleConstraint": query_string
}],
"custId": 1
}
return getEventsByQuery(
session,
query_data,
maxResults,
extendedData,
maxWaitTime,
"FortiSIEM Event Results")
def parse_cmdb_list(cmdb_device):
device_dict = {
'DiscoverMethod': cmdb_device.get('discoverMethod', 'N/A'),
'Approved': cmdb_device.get('approved', 'false'),
'CreationMethod': cmdb_device.get('creationMethod', 'N/A'),
'AccessIp': cmdb_device.get('accessIp', 'N/A'),
'Name': cmdb_device.get('name', 'N/A'),
'WinMachineGuid': cmdb_device.get('winMachineGuid', 'N/A'),
'Unmanaged': cmdb_device.get('unmanaged', 'false'),
'Version': cmdb_device.get('version', 'N/A'),
'UpdateMethod': cmdb_device.get('updateMethod', 'N/A'),
}
timestamp = cmdb_device.get('discoverTime', None)
if timestamp and timestamp.isdigit():
device_dict['DiscoverTime'] = timestamp_to_datestring(timestamp)
elif timestamp:
device_dict['DiscoverTime'] = timestamp
else:
device_dict['DiscoverTime'] = 'N/A'
device_type = cmdb_device.get('deviceType')
if device_type:
device_dict['DeviceType'] = "{} {}".format(device_type['model'], device_type['vendor'])
else:
device_dict['DeviceType'] = 'N/A'
return device_dict
def get_cmdb_devices_command():
args = demisto.args()
device_ip = args.get('device_ip')
limit = int(args.get('limit'))
raw_response = get_cmdb_devices(device_ip, limit)
list_of_devices = list(map(parse_cmdb_list, raw_response))
return_outputs(
tableToMarkdown("Devices", list_of_devices),
{'FortiSIEM.CmdbDevices': list_of_devices},
raw_response
)
@logger
def get_cmdb_devices(device_ip=None, limit=100):
cmdb_url = HOST + "/phoenix/rest/cmdbDeviceInfo/devices"
if device_ip:
cmdb_url += "?includeIps=" + device_ip
response = requests.get(cmdb_url, verify=VERIFY_SSL, auth=AUTH)
list_of_devices = json.loads(xml2json(response.text))
if 'response' in list_of_devices:
return_error(list_of_devices["response"]["error"]["description"])
elif 'devices' in list_of_devices:
list_of_devices = list_of_devices['devices']['device']
elif 'device' in list_of_devices:
list_of_devices = [list_of_devices['device']]
return list_of_devices[:limit]
@logger
def get_events_by_query(query, report_window="60", interval_type="Minute", limit="20", extended_data='false',
max_wait_time=60):
session = login()
query_data = {
"isReportService": True,
"selectClause": "phRecvTime,reptDevIpAddr,eventType,eventName,rawEventMsg,destIpAddr",
"reportWindow": int(report_window),
"reportWindowUnit": interval_type,
"timeRangeRelative": True,
"eventFilters": [{
"groupBy": "",
"singleConstraint": query
}],
"custId": 1
}
return getEventsByQuery(
session,
query_data,
limit,
extended_data,
max_wait_time,
"FortiSIEM Event Results")
def get_lists_command():
raw_resources = get_lists()
resources = []
for r in flatten_resources(raw_resources):
resources.append({
'DisplayName': r['displayName'],
'NatualID': r['naturalId'],
'ID': r['id'],
'ResourceType': r['groupType']['displayName'],
'Children': [c['displayName'] for c in r['children']],
})
return_outputs(
tableToMarkdown('Lists:', resources, removeNull=True),
{'FortiSIEM.ResourceList(val.ID && val.ID == obj.ID)': resources},
raw_response=raw_resources)
@logger
def get_lists():
session = login()
url = REST_ADDRESS + '/group/resource'
response = session.get(url, verify=VERIFY_SSL, auth=AUTH)
return response.json()
def flatten_resources(raw_resources):
for r in raw_resources:
yield r
# possible stackoverflow
for sub_resource in flatten_resources(r['children']):
yield sub_resource
def add_item_to_resource_list_command():
args = demisto.args()
resource_type = parse_resource_type(args['resource_type'])
group_id = args['group_id']
object_info = args.get('object-info', [])
object_info = dict(object_property.strip().split('=', 1) for object_property in object_info.split(','))
raw_response = add_item_to_resource_list(resource_type, group_id, object_info)
outputs = {'FortiSIEM.Resource(val.id && val.id == obj.id)': createContext(raw_response, removeNull=True)}
return_outputs(tableToMarkdown('Resource was added:', raw_response, removeNull=True), outputs, raw_response)
@logger
def add_item_to_resource_list(resource_type, group_id, object_info):
session = login()
url = '{}/{}/save'.format(REST_ADDRESS, resource_type)
object_info['groupId'] = group_id
object_info['active'] = True
object_info['sysDefined'] = False
response = session.post(url, data=json.dumps(object_info), verify=VERIFY_SSL, auth=AUTH)
response = response.json()
if response.get('code', 0) == -1:
return_error(response['msg'])
return response
def remove_item_from_resource_list_command():
args = demisto.args()
resource_type = parse_resource_type(args['resource_type'])
deleted_ids = args.get('ids', '').split(',')
raw_response = remove_item_from_resource_list(resource_type, deleted_ids)
return_outputs(raw_response, {}, raw_response=raw_response)
@logger
def remove_item_from_resource_list(resource_type, deleted_ids):
session = login()
url = '{}/{}/del'.format(REST_ADDRESS, resource_type)
response = session.delete(url, params={'ids': json.dumps(deleted_ids)}, verify=VERIFY_SSL, auth=AUTH)
if response.text != '"OK"':
return_error(response.text)
return 'items with id {} were removed.'.format(deleted_ids)
def get_resource_list_command():
args = demisto.args()
resource_type = parse_resource_type(args['resource_type'])
group_id = args['group_id']
raw_response = get_resource_list(resource_type, group_id)
headers = raw_response.get('headerData', {}).get('keys', [])
ec = []
for element in raw_response.get('lightValueObjects', []):
e = dict(zip(headers, element.get('data', [])))
e['id'] = element.get('objectId')
ec.append(e)
outputs = {'FortiSIEM.Resource(val.id && val.id == obj.id)': createContext(ec, removeNull=True)}
return_outputs(tableToMarkdown('Resource list:', ec, headerTransform=pascalToSpace, removeNull=True),
outputs,
raw_response)
@logger
def get_resource_list(resource_type, group_id):
session = login()
url = '{}/{}/list'.format(REST_ADDRESS, resource_type)
params = {
'groupId': group_id,
'start': 0,
'size': 50,
}
response = session.get(url, params=params, verify=VERIFY_SSL, auth=AUTH)
response = response.json()
if response.get('code', 0) == -1:
return_error(response['msg'])
return response
def convert_keys_to_snake_case(d):
d = dict((k.replace("-", "_"), v) for k, v in d.items())
return d
def test():
try:
login()
except Exception as e:
if isinstance(e, requests.exceptions.SSLError):
demisto.results("Not verified certificate")
else:
demisto.results(str(e))
demisto.results('ok')
def fetch_incidents():
query_id = GetEventQuery()
res = GetIncidentsByOrg(query_id)
known_ids = demisto.getLastRun().get('ids', None)
if known_ids is None or not known_ids:
known_ids = []
incidents = []
for inc in res:
if inc.get('incidentId') not in known_ids:
incidents.append({"name": inc.get('eventName', 'New FortiSIEM Event'), "rawJSON": json.dumps(inc)})
if len(known_ids) >= 1000:
known_ids.pop(0)
known_ids.append(inc.get('incidentId'))
demisto.setLastRun({
'ids': known_ids,
'extended_keys': EXTENDED_KEYS
})
demisto.incidents(incidents)
sys.exit(0)
def main():
try:
handle_proxy()
load_extended_keys()
if demisto.command() == 'test-module':
test()
elif demisto.command() == 'fetch-incidents':
fetch_incidents()
elif demisto.command() == 'fortisiem-get-events-by-incident':
args = demisto.args()
getEventsByIncident(args['incID'], args['maxResults'], args['extendedData'], args['maxWaitTime'])
elif demisto.command() == 'fortisiem-clear-incident':
clear_incident_command()
elif demisto.command() == 'fortisiem-get-events-by-filter':
args = demisto.args()
getEventsByFilter(args['maxResults'], args['extendedData'], args['maxWaitTime'], args['reportWindow'],
args['reportWindowUnit'])
elif demisto.command() == 'fortisiem-get-events-by-query':
args = convert_keys_to_snake_case(demisto.args())
get_events_by_query(**args)
elif demisto.command() == 'fortisiem-get-cmdb-devices':
get_cmdb_devices_command()
elif demisto.command() == 'fortisiem-get-lists':
get_lists_command()
elif demisto.command() == 'fortisiem-add-item-to-resource-list':
add_item_to_resource_list_command()
elif demisto.command() == 'fortisiem-remove-item-from-resource-list':
remove_item_from_resource_list_command()
elif demisto.command() == 'fortisiem-get-resource-list':
get_resource_list_command()
except Exception as e:
if demisto.command() == 'fetch-incidents':
LOG(str(e))
LOG.print_log()
raise
else:
return_error(str(e))
# python2 uses __builtin__ python3 uses builtins
if __name__ == "__builtin__" or __name__ == "builtins":
main()
|
the-stack_0_26822
|
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Service Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.frame
from .x_synchronous_frame_loader import XSynchronousFrameLoader as XSynchronousFrameLoader_5a8d1058
class OfficeFrameLoader(XSynchronousFrameLoader_5a8d1058):
"""
Service Class
**since**
LibreOffice 4.2
See Also:
`API OfficeFrameLoader <https://api.libreoffice.org/docs/idl/ref/servicecom_1_1sun_1_1star_1_1frame_1_1OfficeFrameLoader.html>`_
"""
__ooo_ns__: str = 'com.sun.star.frame'
__ooo_full_ns__: str = 'com.sun.star.frame.OfficeFrameLoader'
__ooo_type_name__: str = 'service'
__all__ = ['OfficeFrameLoader']
|
the-stack_0_26824
|
#!/usr/bin/env python
#!/bin/env python
'''
This script simply executes a solver, and ensures the expected number of iterations are performed.
It does
1: a multigrid solve
An exception is thrown otherwise.
'''
import underworld as uw
from underworld import function as fn
res=32
mesh = uw.mesh.FeMesh_Cartesian("Q1/DQ0", (res,res), (0.,0.), (1.,1.))
velocityField = uw.mesh.MeshVariable(mesh,2)
velocityField.data[:] = (0.,0.)
pressureField = uw.mesh.MeshVariable(mesh.subMesh,1)
pressureField.data[:] = 0.
# freeslip
IWalls = mesh.specialSets["MinI_VertexSet"] + mesh.specialSets["MaxI_VertexSet"]
JWalls = mesh.specialSets["MinJ_VertexSet"] + mesh.specialSets["MaxJ_VertexSet"]
freeslip = uw.conditions.DirichletCondition(velocityField, (IWalls, JWalls))
# We are going to make use of one of the existing analytic solutions so that we may easily
# obtain functions for a viscosity profile and forcing terms.
# Exact solution solCx with defaults
sol = fn.analytic.SolCx(eta_A=1.0, eta_B=10000.0, x_c=0.478, n_z=3)
stokesSystem = uw.systems.Stokes(velocityField,pressureField,sol.fn_viscosity,sol.fn_bodyforce,conditions=[freeslip,])
#Run the BSSCR Solver
# can optionally set penalty this way
solver=uw.systems.Solver(stokesSystem)
solver.options.A11.ksp_converged_reason=''
#solver.options.mg.pc_mg_type="additive"
#solver.options.mg.pc_mg_type="full"
#solver.options.mg.pc_mg_type="kaskade"
#solver.options.mg.pc_mg_type="multiplicative"
solver.options.mg_accel.mg_accelerating_smoothing=0
solver.options.mg_accel.mg_accelerating_smoothing_view=1
#solver.options.main.penalty=1000.0
#solver.options.main.help=''
solver.options.main.penalty=10.0
solver.options.main.restore_K=True
solver.solve()
stats=solver.get_stats()
solver.print_stats()
from libUnderworld import petsc
petsc.OptionsPrint()
def check():
if stats.pressure_its > 5:
raise RuntimeError("Test appears to require too many pressure iterations. Iteration count = {}.".format(stats.pressure_its))
if stats.velocity_presolve_its > 10:
raise RuntimeError("Test appears to require too many velocity pre solve iterations. Iteration count = {}.".format(stats.velocity_presolve_its))
if -1 != stats.velocity_pressuresolve_its: # -1 will be returned if this stat isn't supported.
if stats.velocity_pressuresolve_its > 40 :
raise RuntimeError("Test appears to require too many velocity pressure solve iterations. Iteration count = {}.".format(stats.velocity_pressuresolve_its))
if stats.velocity_backsolve_its > 8:
raise RuntimeError("Test appears to require too many velocity back solve iterations. Iteration count = {}.".format(stats.velocity_backsolve_its))
check()
solver.set_inner_method("lu")
solver.solve()
solver.set_inner_method("mg")
solver.solve()
solver.print_stats()
petsc.OptionsPrint()
check()
|
the-stack_0_26825
|
# -*- coding:utf-8 -*-
"""
1. 导入模块
2. 创建套接字
3. 绑定端口
4. 设置监听, 设置套接字由主动为被动.
5. 接受客户端连接
6. 接收客户端发送的文件名
7. 根据文件名读取文件内容
8. 把读取的内容发送给客户端(循环)
9. 关闭和客户端的连接.
10. 关闭套接字
"""
import socket
Filesocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
Filesocket.bind(("18.18.23.108",8888))
Filesocket.listen()
while True:
new_client_socket,ip_port = Filesocket.accept()
print("欢迎新客户端:",ip_port)
recvdata = new_client_socket.recv(1024)
file_name = recvdata.decode()
print(file_name)
try:
with open(file_name,"rb") as fil:
while True:
filedata = fil.read(1024)
# 判断是否读取到了文件的末尾.
if filedata:
new_client_socket.send(filedata)
else:
break
except Exception as e:
print("文件:{}下载失败".format(file_name))
break
else:
print("文件:{}下载成功.".format(file_name))
break
new_client_socket.close()
Filesocket.close()
|
the-stack_0_26826
|
from typing import Dict
from setuptools import find_packages, setup
def get_version() -> str:
version: Dict[str, str] = {}
with open("dagster_shell/version.py") as fp:
exec(fp.read(), version) # pylint: disable=W0122
return version["__version__"]
if __name__ == "__main__":
ver = get_version()
# dont pin dev installs to avoid pip dep resolver issues
pin = "" if ver == "0+dev" else f"=={ver}"
setup(
name="dagster-shell",
version=ver,
author="Elementl",
author_email="[email protected]",
license="Apache-2.0",
description="Package for Dagster shell ops.",
url="https://github.com/dagster-io/dagster/tree/master/python_modules/libraries/dagster-shell",
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
packages=find_packages(exclude=["dagster_shell_tests*"]),
install_requires=[f"dagster{pin}"],
extras_require={"test": ["psutil"]},
zip_safe=False,
)
|
the-stack_0_26827
|
import pytest
import requests
import time
from threading import Thread
from bottle import default_app, WSGIRefServer
from tomviz.acquisition import server
class Server(Thread):
def __init__(self):
super(Server, self).__init__()
self.host = 'localhost'
self.port = 9999
self.base_url = 'http://%s:%d' % (self.host, self.port)
self.url = '%s/acquisition' % self.base_url
self._server = WSGIRefServer(host=self.host, port=self.port)
def run(self):
self.setup()
self._server.run(app=default_app())
def start(self):
super(Server, self).start()
# Wait for bottle to start
while True:
try:
requests.get(self.base_url)
break
except requests.ConnectionError:
time.sleep(0.1)
def setup(self, adapter=None):
server.setup_app(adapter)
def stop(self):
self._server.srv.shutdown()
# Force the socket to close so we can reuse the same port
self._server.srv.socket.close()
@pytest.fixture(scope="module")
def acquisition_server():
srv = Server()
srv.start()
yield srv
srv.stop()
srv.join()
|
the-stack_0_26828
|
# -*- coding: utf-8 -*-
import csv
import os
import pathlib
import sys
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from scipy import misc
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import euclidean_distances
from preprocess import align
class MaigoDataBase(object):
"""Data strage for people with face image.
Attributes:
people (list[dict]): List of people data.
People data has face image, name, ...
feature_list (list): List of feature vectors.
After build, this list will be empty
features (numpy.ndarray): Feature vectors of the storaged people.
"""
def __init__(self):
self.people = []
def add(self, person):
self.people.append(person)
def load(self, path, encoding='utf-8'):
with open(str(path), 'r', encoding=encoding) as f:
reader = csv.DictReader(f)
if reader.fieldnames[0] != 'maigo_name':
raise ValueError("Please Check Encodeing (=UTF-8 without BOM?): {}".format(str(path)))
for row in reader:
if row['deleted'] != '1': # 削除済み以外
self.people.append(row)
return
class ImageStorage(object):
"""
"""
def __init__(self, model,):
self.model = model
self.image_paths = []
self.labels = []
self.labels_str = []
# self.images = []
self.features = []
self.size = 0
self.label2num = {}
def add(self, image_path, label,):
if label not in self.label2num:
self.label2num[label] = len(self.label2num)
images, extracted_filepaths = align([image_path], image_size=self.model.input_image_size, margin=44, gpu_memory_fraction=1.0)
if not extracted_filepaths:
return
self.image_paths.append(str(image_path))
self.labels_str.append(label)
self.labels.append(self.label2num[label])
# image = load_image(str(image_path), self.model.input_image_size, self.model.input_image_size, 'RGB')
# self.images.append(image)
feature = self.model(images[0])
self.features.append(feature)
self.size += 1
def save(self, path, only_feature=True,):
if only_feature:
np.save(path, self.features)
else:
# パスとラベルとfeatureをまとめてcsvか何かに保存
pass
def compare(self, idx_1, idx_2, plot=True,):
path_1 = str(self.image_paths[idx_1])
path_2 = str(self.image_paths[idx_2])
print('Path 1: {}'.format(path_1))
print('Path 2: {}'.format(path_2))
img_1 = load_image(path_1)
img_2 = load_image(path_2)
print('Shape 1: ', img_1.shape)
print('Shape 2: ', img_2.shape)
feature_1 = self.features[idx_1]
feature_2 = self.features[idx_2]
dist_euclid = euclidean_distances(feature_1, feature_2)[0, 0]
print('Euclidian Distance: {}'.format(dist_euclid))
cos_sim = cosine_similarity(feature_1, feature_2)[0, 0]
print('Cosine Similarity: {}'.format(cos_sim))
if plot:
fig, (axL, axR) = plt.subplots(ncols=2, figsize=(10,4))
axL.imshow(img_1)
axL.set_title('img_1')
axR.imshow(img_2)
axR.set_title('img_2')
plt.show()
return
def most_similar(self, idx_1, idx_2, metrics='cosine'):
pass
|
the-stack_0_26831
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Factory method for easily getting imdbs by name."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__sets = {}
from datasets.pascal_voc import pascal_voc
from datasets.coco import coco
from datasets.imagenet import imagenet
from datasets.vg import vg
from datasets.cityscape import cityscape
from datasets.KITTI import KITTI
from datasets.SIM import SIM
from datasets.pascal_voc6cls import pascal_voc6cls
import os
from model.utils.config import cfg
import numpy as np
for year in ['2007']:
for split in ['train_t', 'test_t']:
name = 'voc_clipart_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: pascal_voc6cls(split, year, os.path.join(cfg.DATA_DIR, 'clipart/')))
for year in ['2007']:
for split in ['train_cartoon', 'test_cartoon']:
name = 'voc_cartoon_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: pascal_voc6cls(split, year, os.path.join(cfg.DATA_DIR, 'cartoon/')))
for year in ['2007', '2012']:
for split in ['train_s', 'train_t', 'train_all', 'test_s', 'test_s500', 'test_t','test_all']:
name = 'cityscape_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: cityscape(split, year))
for year in ['2007']:
for split in ['trainall', 'train500']:
name = 'KITTI_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: KITTI(split, year, os.path.join(cfg.DATA_DIR, 'KITTI/')))
for year in ['2007']:
for split in ['train_s', 'test_s', 'train_t','test_t', 'test_s500']:
name = 'KITTI_cityscape_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: KITTI(split, year, os.path.join(cfg.DATA_DIR, 'cityscape/')))
for year in ['2012']:
for split in ['train_s']:
name = 'SIM_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: SIM(split, year, os.path.join(cfg.DATA_DIR, 'SIM/')))
for year in ['2007']:
for split in ['train_s', 'train_t', 'test_t', 'test_s500']:
name = 'SIM_cityscape_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: SIM(split, year, os.path.join(cfg.DATA_DIR, 'cityscape/')))
# Set up voc_<year>_<split>
for year in ['2007', '2012']:
for split in ['train', 'val', 'trainval', 'test']:
name = 'voc_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: pascal_voc6cls(split, year))
# Set up coco_2014_<split>
for year in ['2014']:
for split in ['train', 'val', 'minival', 'valminusminival', 'trainval']:
name = 'coco_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: coco(split, year))
# Set up coco_2014_cap_<split>
for year in ['2014']:
for split in ['train', 'val', 'capval', 'valminuscapval', 'trainval']:
name = 'coco_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: coco(split, year))
# Set up coco_2015_<split>
for year in ['2015']:
for split in ['test', 'test-dev']:
name = 'coco_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: coco(split, year))
# Set up vg_<split>
# for version in ['1600-400-20']:
# for split in ['minitrain', 'train', 'minival', 'val', 'test']:
# name = 'vg_{}_{}'.format(version,split)
# __sets[name] = (lambda split=split, version=version: vg(version, split))
for version in ['150-50-20', '150-50-50', '500-150-80', '750-250-150', '1750-700-450', '1600-400-20']:
for split in ['minitrain', 'smalltrain', 'train', 'minival', 'smallval', 'val', 'test']:
name = 'vg_{}_{}'.format(version,split)
__sets[name] = (lambda split=split, version=version: vg(version, split))
# set up image net.
for split in ['train', 'val', 'val1', 'val2', 'test']:
name = 'imagenet_{}'.format(split)
devkit_path = 'data/imagenet/ILSVRC/devkit'
data_path = 'data/imagenet/ILSVRC'
__sets[name] = (lambda split=split, devkit_path=devkit_path, data_path=data_path: imagenet(split,devkit_path,data_path))
def get_imdb(name):
"""Get an imdb (image database) by name."""
if name not in __sets:
raise KeyError('Unknown dataset: {}'.format(name))
return __sets[name]()
def list_imdbs():
"""List all registered imdbs."""
return list(__sets.keys())
|
the-stack_0_26832
|
import pytest
import operator, os
import common_code
def error_if(f, f_symbol, data_field, actual_values, expected_values,
model_name, errors, all_values, frequency_str):
d = actual_values[data_field]
if f_symbol == '<':
# Every time a value is smaller, update archive_value
archive_value = float('inf')
elif f_symbol == '>':
# Every time a value is greater, update archive_value
archive_value = float('-inf')
else:
raise Exception('Invalid Function Symbol %s' % f_symbol)
for model_id in sorted(d.keys()):
for epoch_id in sorted(d[model_id].keys()):
actual_value = d[model_id][epoch_id]
expected_value = expected_values[model_name + frequency_str][data_field]
if actual_value is None:
errors.append('actual_value: d[%s][%s] is None' % (model_id, epoch_id))
else:
print('actual_value={av}'.format(av=actual_value))
if expected_value is None:
errors.append(
'expected_value: d[%s]([%s] is None' % (model_id, epoch_id))
else:
print('expected_value={ev}'.format(ev=expected_value))
if (actual_value is not None) and (expected_value is not None):
if f(actual_value, expected_value):
errors.append('%f %s %f %s Model %s Epoch %s %s' % (
actual_value, f_symbol, expected_value, model_name, model_id,
epoch_id, data_field))
all_values.append('%f %s Model %s Epoch %s %s' % (
actual_value, model_name, model_id, epoch_id, data_field))
if f(actual_value, archive_value):
archive_value = actual_value
else:
print('archiving: either actual_value or expected_value is None.')
return archive_value
def run_tests(actual_performance, model_name, dir_name, should_log,
compiler_name, cluster, frequency_str=''):
expected_performance = common_code.csv_to_dict(
'%s/bamboo/integration_tests/expected_values/%s/%s/expected_performance.csv' % (dir_name, cluster, compiler_name))
errors = []
all_values = []
greater_than = operator.gt
less_than = operator.lt
max_run_time = error_if(greater_than, '>', 'training_run_time', actual_performance, expected_performance, model_name, errors, all_values, frequency_str)
max_mean = error_if(greater_than, '>', 'training_mean', actual_performance, expected_performance, model_name, errors, all_values, frequency_str)
max_max = error_if(greater_than, '>', 'training_max', actual_performance, expected_performance, model_name, errors, all_values, frequency_str)
max_min = error_if(greater_than, '>', 'training_min', actual_performance, expected_performance, model_name, errors, all_values, frequency_str)
max_stdev = error_if(greater_than, '>', 'training_stdev', actual_performance, expected_performance, model_name, errors, all_values, frequency_str)
min_accuracy = error_if(less_than, '<', 'test_accuracy', actual_performance, expected_performance, model_name, errors, all_values, frequency_str)
archival_string = '%s, %f, %f, %f, %f, %f, %f\n' % (
os.environ['bamboo_buildNumber'], max_run_time, max_mean, max_max, max_min,
max_stdev, min_accuracy)
print('archival_string: ' + archival_string)
if os.environ['LOGNAME'] == 'lbannusr':
key = 'bamboo_planKey'
if key in os.environ:
plan = os.environ[key]
if plan in ['LBANN-NIGHTD', 'LBANN-WD']:
archive_file = '/usr/workspace/wsb/lbannusr/archives/%s/%s/%s/performance_%s.txt' % (plan, cluster, compiler_name, model_name)
print('Archive file: ' + archive_file)
with open(archive_file, 'a') as archive:
print('Archiving to file.')
archive.write(archival_string)
else:
print('The plan %s does not have archiving activated' % plan)
else:
print('%s is not in os.environ' % key)
else:
print('os.environ["LOGNAME"]=%s' % os.environ['LOGNAME'])
print('Errors for: %s %s (%d)' % (model_name, compiler_name, len(errors)))
for error in errors:
print(error)
if should_log:
print('All values for: %s %s (%d)' % (
model_name, compiler_name, len(all_values)))
for value in all_values:
print(value)
assert errors == []
DATA_FIELDS = [
'training_run_time',
'training_mean',
'training_max',
'training_min',
'training_stdev',
'test_accuracy'
]
def skeleton_performance_lenet_mnist(cluster, dir_name, executables,
compiler_name):
if compiler_name not in executables:
e = 'skeleton_performance_lenet_mnist: default_exes[%s] does not exist' % compiler_name
print('Skip - ' + e)
pytest.skip(e)
executable = executables[compiler_name]
model_name = 'lenet_mnist'
model_folder = 'models/' + model_name
should_log = True
actual_performance = common_code.skeleton(
cluster, dir_name, executable, model_folder, model_name, DATA_FIELDS,
should_log, compiler_name=compiler_name)
run_tests(actual_performance, model_name, dir_name, should_log,
compiler_name, cluster)
def skeleton_performance_alexnet(cluster, dir_name, executables, compiler_name,
weekly):
if compiler_name not in executables:
e = 'skeleton_performance_alexnet: default_exes[%s] does not exist' % compiler_name
print('Skip - ' + e)
pytest.skip(e)
executable = executables[compiler_name]
model_name = 'alexnet'
model_folder = 'models/' + model_name
should_log = True
actual_performance = common_code.skeleton(
cluster, dir_name, executable, model_folder, model_name, DATA_FIELDS,
should_log, compiler_name=compiler_name, weekly=weekly)
frequency_str = '_nightly'
if weekly:
frequency_str = '_weekly'
run_tests(actual_performance, model_name, dir_name, should_log,
compiler_name, cluster, frequency_str)
def skeleton_performance_full_alexnet(cluster, dir_name, executables,
compiler_name, weekly, run):
if not run:
e = 'skeleton_performance_full_alexnet: Ignored'
print('Skip - ' + e)
pytest.skip(e)
if not weekly:
e = 'skeleton_performance_full_alexnet: Non-local testing'
print('Skip - ' + e)
pytest.skip(e)
if compiler_name not in executables:
e = 'skeleton_performance_full_alexnet: default_exes[%s] does not exist' % compiler_name
print('Skip - ' + e)
pytest.skip(e)
executable = executables[compiler_name]
if not os.path.exists(executable):
pytest.skip('Executable does not exist: %s' % executable)
model_name = 'full_alexnet'
should_log = True
output_file_name = '%s/bamboo/integration_tests/output/%s_%s_output.txt' %(dir_name, model_name, compiler_name)
error_file_name = '%s/bamboo/integration_tests/error/%s_%s_error.txt' %(dir_name, model_name, compiler_name)
if cluster in ['catalyst']:
command = 'salloc --nodes 128 %s/bamboo/integration_tests/%s.sh > %s 2> %s' % (dir_name, model_name, output_file_name, error_file_name)
elif cluster in ['pascal', 'ray']:
e = 'skeleton_performance_full_alexnet: Pascal, Ray are unsupported for skeleton_performance_full_alexnet'
print('Skip - ' + e)
pytest.skip(e)
else:
raise Exception('Unsupported Cluster %s' % cluster)
common_code.run_lbann(command, model_name, output_file_name, error_file_name,
should_log) # Don't need return value
actual_performance = common_code.extract_data(output_file_name, DATA_FIELDS,
should_log)
run_tests(actual_performance, model_name, dir_name, should_log, compiler_name,
cluster)
def test_integration_performance_lenet_mnist_clang6(cluster, dirname, exes):
skeleton_performance_lenet_mnist(cluster, dirname, exes, 'clang6')
def test_integration_performance_alexnet_clang6(cluster, dirname, exes, weekly):
skeleton_performance_alexnet(cluster, dirname, exes, 'clang6', weekly)
def test_integration_performance_full_alexnet_clang6(cluster, dirname, exes,
weekly, run):
skeleton_performance_full_alexnet(cluster, dirname, exes, 'clang6', weekly,
run)
def test_integration_performance_lenet_mnist_gcc7(cluster, dirname, exes):
skeleton_performance_lenet_mnist(cluster, dirname, exes, 'gcc7')
def test_integration_performance_alexnet_gcc7(cluster, dirname, exes, weekly):
skeleton_performance_alexnet(cluster, dirname, exes, 'gcc7', weekly)
def test_integration_performance_full_alexnet_gcc7(cluster, dirname, exes,
weekly, run):
skeleton_performance_full_alexnet(cluster, dirname, exes, 'gcc7', weekly, run)
def test_integration_performance_lenet_mnist_intel19(cluster, dirname, exes):
skeleton_performance_lenet_mnist(cluster, dirname, exes, 'intel19')
def test_integration_performance_alexnet_intel19(cluster, dirname, exes,
weekly):
skeleton_performance_alexnet(cluster, dirname, exes, 'intel19', weekly)
def test_integration_performance_full_alexnet_intel19(cluster, dirname, exes,
weekly, run):
skeleton_performance_full_alexnet(cluster, dirname, exes, 'intel19', weekly,
run)
# Run with python -m pytest -s test_integration_performance.py -k 'test_integration_performance_lenet_mnist_exe' --exe=<executable>
def test_integration_performance_lenet_mnist_exe(cluster, dirname, exe):
if exe is None:
e = 'test_integration_performance_lenet_mnist_exe: Non-local testing'
print('Skip - ' + e)
pytest.skip(e)
exes = {'exe': exe}
skeleton_performance_lenet_mnist(cluster, dirname, exes, 'exe')
# Run with python -m pytest -s test_integration_performance.py -k 'test_integration_performance_alexnet_exe' --exe=<executable>
def test_integration_performance_alexnet_exe(cluster, dirname, exe):
if exe is None:
e = 'stest_integration_performance_alexnet_exe: Non-local testing'
print('Skip - ' + e)
pytest.skip(e)
exes = {'exe': exe}
skeleton_performance_alexnet(cluster, dirname, exes, 'exe', True)
# Run with python -m pytest -s test_integration_performance.py -k 'test_integration_performance_full_alexnet_exe' --exe=<executable>
def test_integration_performance_full_alexnet_exe(cluster, dirname, exe):
if exe is None:
e = 'test_integration_performance_full_alexnet_exe: Non-local testing'
print('Skip - ' + e)
pytest.skip(e)
exes = {'exe': exe}
skeleton_performance_full_alexnet(cluster, dirname, exes, 'exe', True)
|
the-stack_0_26835
|
from django.shortcuts import render, redirect
from .models import GroupUserConnection, GroupTaskConnection
from Group.models import Group
from Task.models import Task
from django.contrib.auth.models import User
from Users.is_user_in_group import is_user_in_group
from Users.models import Profile
from Group.destroy_group import destroy_group
def my_groups(request):
grps = GroupUserConnection.get_all_groups_of_user(request.user)
return render(request, 'Connections/mygroups.html', {'groups':grps})
def view_group(request, group_id):
curr_grp = Group.objects.filter(id=group_id).first()
if not curr_grp:
return redirect('home')
elif not is_user_in_group(request.user,curr_grp):
return redirect('home')
else:
grp_id = curr_grp.id
is_creator = curr_grp.is_user_creator(request.user)
name = curr_grp.name
prize = curr_grp.prize
tasks = GroupTaskConnection.get_all_tasks_of_group(curr_grp)
participents = GroupUserConnection.get_all_users_of_group(curr_grp)
return render(request, 'Connections/group.html', {
'grp_id':grp_id,
'is_creator':is_creator,
'name':name,
'prize':prize,
'tasks':tasks,
'par':participents
})
def assign(request, task_id):
curr_task = Task.objects.filter(id=task_id).first()
if not curr_task:
return redirect('home')
grp = GroupTaskConnection.objects.filter(task=curr_task).first().group
if not is_user_in_group(request.user,grp):
return redirect('home')
else:
curr_task.assignee = request.user
curr_task.save()
name = grp.name
prize = grp.prize
tasks = GroupTaskConnection.get_all_tasks_of_group(grp)
participents = GroupUserConnection.get_all_users_of_group(grp)
return render(request, 'Connections/group.html', {
'name':name,
'prize':prize,
'tasks':tasks,
'par':participents
})
def approve(request, task_id):
curr_task = Task.objects.filter(id=task_id).first()
if not curr_task:
return redirect('home')
grp = GroupTaskConnection.objects.filter(task=curr_task).first().group
if grp.creator is not request.user:
return redirect('home')
else:
curr_task.approve()
curr_task.save()
name = grp.name
prize = grp.prize
tasks = GroupTaskConnection.get_all_tasks_of_group(grp)
participents = GroupUserConnection.get_all_users_of_group(grp)
return render(request, 'Connections/group.html', {
'name':name,
'prize':prize,
'tasks':tasks,
'par':participents
})
def dsapprove(request, task_id):
curr_task = Task.objects.filter(id=task_id).first()
if not curr_task:
return redirect('home')
grp = GroupTaskConnection.objects.filter(task=curr_task).first().group
if grp.creator is not request.user:
return redirect('home')
else:
curr_task.dis_approve()
curr_task.save()
name = grp.name
prize = grp.prize
tasks = GroupTaskConnection.get_all_tasks_of_group(grp)
participents = GroupUserConnection.get_all_users_of_group(grp)
return render(request, 'Connections/group.html', {
'name':name,
'prize':prize,
'tasks':tasks,
'par':participents
})
def delete_group(request, group_id):
grp = Group.objects.filter(id=group_id).first()
if grp.is_user_creator(request.user):
destroy_group(grp)
return redirect('mygroups')
|
the-stack_0_26836
|
from django.contrib.auth import get_user_model
from django.db import models
class ObjectField(models.Model):
note = models.TextField(
blank=True,
null=True,
default='',
)
submitted = models.DateTimeField(
auto_now_add=True,
)
submitter = models.ForeignKey(
get_user_model(),
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='%(app_label)s_%(class)s_submitter_set',
related_query_name='%(app_label)s_%(class)ss_submitter',
)
changed = models.DateTimeField(
auto_now=True,
)
changer = models.ForeignKey(
get_user_model(),
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='%(app_label)s_%(class)s_changer_set',
related_query_name='%(app_label)s_%(class)ss_changer',
)
class Meta:
abstract = True
|
the-stack_0_26838
|
'''
Triplet Sum
You have been given a random integer array/list(ARR) and a number X. Find and return the number of triplets in the array/list which sum to X.
Note :
Given array/list can contain duplicate elements.
Input format :
The first line contains an Integer 't' which denotes the number of test cases or queries to be run. Then the test cases follow.
First line of each test case or query contains an integer 'N' representing the size of the first array/list.
Second line contains 'N' single space separated integers representing the elements in the array/list.
Third line contains an integer 'X'.
Output format :
For each test case, print the total number of triplets present in the array/list.
Output for every test case will be printed in a separate line.
Constraints :
1 <= t <= 50
0 <= N <= 10^2
0 <= X <= 10^9
Time Limit: 1 sec
Sample Input 1:
1
7
1 2 3 4 5 6 7
12
Sample Output 1:
5
Sample Input 2:
2
7
1 2 3 4 5 6 7
19
9
2 -5 8 -6 0 5 10 11 -3
10
Sample Output 2:
0
5
Explanation for Input 2:
Since there doesn't exist any triplet with sum equal to 19 for the first query, we print 0.
For the second query, we have 5 triplets in total that sum up to 10. They are, (2, 8, 0), (2, 11, -3), (-5, 5, 10), (8, 5, -3) and (-6, 5, 11)
'''
from sys import stdin
def findTriplet(arr, n, x) :
c = 0
for i in range(n):
for j in range(i+1, n):
for k in range(j+1, n):
if arr[i] + arr[j] + arr[k] == x:
c += 1
return c
#Your code goes here
#return your answer
#Taking Input Using Fast I/O
def takeInput() :
n = int(stdin.readline().strip())
if n == 0 :
return list(), 0
arr = list(map(int, stdin.readline().strip().split(" ")))
return arr, n
#main
t = int(stdin.readline().strip())
while t > 0 :
arr, n = takeInput()
x = int(stdin.readline().strip())
print(findTriplet(arr, n, x))
t -= 1
|
the-stack_0_26839
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import pkg_resources
from esrally import paths
from esrally.utils import git, io
__version__ = pkg_resources.require("esrally")[0].version
__RALLY_VERSION_PATTERN = re.compile(r"^(\d+)(?:\.(\d+))?(?:\.(\d+))?(?:.(.+))?$")
def version():
"""
:return: The release version string and an optional suffix for the current git revision if Rally is installed in development mode.
"""
release = __version__
# noinspection PyBroadException
try:
if git.is_working_copy(io.normalize_path("%s/.." % paths.rally_root())):
revision = git.head_revision(paths.rally_root())
return "%s (git revision: %s)" % (release, revision.strip())
except BaseException:
pass
# cannot determine head revision so user has probably installed Rally via pip instead of git clone
return release
def release_version():
"""
:return: The release version string split into its components: major, minor, patch and optional suffix.
"""
matches = __RALLY_VERSION_PATTERN.match(__version__)
if matches.start(4) > 0:
return int(matches.group(1)), int(matches.group(2)), int(matches.group(3)), matches.group(4)
elif matches.start(3) > 0:
return int(matches.group(1)), int(matches.group(2)), int(matches.group(3)), None
|
the-stack_0_26843
|
"""
Codemonk link: https://www.hackerearth.com/practice/algorithms/graphs/minimum-spanning-tree/practice-problems/algorithm/mr-president/
You have recently started playing a brand new computer game called "Mr. President". The game is about ruling a country,
building infrastructures and developing it. Your country consists of N cities and M bidirectional roads connecting them.
Each road has assigned a cost of its maintenance. The greatest achievement in the game is called "Great administrator"
and it is given to a player who manage to have all cities in the country connected by roads in such a way that it is
possible to travel between any two cities and that the sum of maintenance costs of these roads is not greater than K.
This is very hard to accomplish, but you are very close to do it. More precisely, you have just discovered a new method
of transforming standard roads into super roads, with cost of maintenance just 1, due to their extreme durability. The
bad news is that it is very expensive to transform a standard road into a super road, but you are so excited that you
are going to do it anyway. In addition, because you have a lot of other expenses, you also want to first demolish as
many roads as possible in order to safe some money on their maintenance first and then start working on getting the
achievement. You can demolish any road in the country and that operation does not cost you anything. Because you want to
spend the absolutely minimum money in order to get the achievement, you are interested in the smallest number of
transformations of standard roads into super roads in such a way that you can do that.
Input - Output:
In the first line there are 3 integers N, M and K denoting the number of cities in the country,
the number of roads in it and the desired sum of costs of maintenance.
M lines describing these roads follow. In each of them there are 3 integers A, B and C, where A and B denote
the endpoints of the road while C denotes the cost of its maintenance.
Output the minimum number of roads which need to be transformed in order to get the achievement.
If you cannot do it no matter what, output -1.
Sample input:
3 3 25
1 2 10
2 3 20
3 1 30
Sample Output:
1
"""
"""
The problem is simple. We have to find the MST for the given graph (cities and roads connecting them can be represented
by a graph) and then, iterate from the biggest to smallest weight and find how many roads we need to change to achieve
our goal, of keeping the cost lower than K. That's possible if there is no MST for the graph or if even if we replace
the roads the cost will still be greater than K. Think of it!
Final complexity: O(E*INVERSE_ACKERMAN + ElogE)
"""
def balance(disjoint_set, i):
if disjoint_set[i] != i:
disjoint_set[i] = balance(disjoint_set, disjoint_set[i])
return disjoint_set[i]
def kruskal(graph):
disjoint_set = [i for i in range(n)]
sizes = [1] * n
priority_array = []
total = 0
count = 0
for i in range(m):
root_a = balance(disjoint_set, graph[i][1]-1)
root_b = balance(disjoint_set, graph[i][2]-1)
if root_a != root_b:
if sizes[root_a] <= sizes[root_b]:
sizes[root_b] += sizes[root_a]
disjoint_set[root_a] = root_b
else:
sizes[root_a] += sizes[root_a]
disjoint_set[root_b] = root_a
total += graph[i][0]
priority_array.append(graph[i][0])
for i in range(len(priority_array)-1, -1, -1):
if total > k:
count += 1
total = total - priority_array[i] + 1
if total > k:
return -1
current = balance(disjoint_set, 0)
for i in range(1, len(disjoint_set)):
temp = balance(disjoint_set, i)
if temp != current:
return -1
current = temp
return count
n, m, k = map(int, input().split())
sorted_graph = []
for _ in range(m):
a, b, c = map(int, input().split())
sorted_graph.append((c, a, b))
sorted_graph = sorted(sorted_graph, key=lambda x: x[0])
print(kruskal(sorted_graph))
|
the-stack_0_26845
|
from django.conf.urls import url, include
from django.views.generic.edit import CreateView
from django.contrib.auth.forms import UserCreationForm
from django.views.generic import RedirectView
urlpatterns = [
url("^$", RedirectView.as_view(pattern_name="register")),
url(
"^register/",
CreateView.as_view(
template_name="register.html", form_class=UserCreationForm, success_url="/"
),
name="register",
),
url("^accounts/", include("django.contrib.auth.urls")),
# rest of your URLs as normal
]
|
the-stack_0_26847
|
from os import listdir
from os.path import isfile, join
import gzip
mypath = "../../data/tweets_clean/"
hashtags = open("../../data/top-hash-tags/top300hashtags","r")
wordsToRemove = open("../../data/top-words/unwanted-words","r")
saida = gzip.open("../../data/entradaLDA/userDocument.gz","w")
validTags = {tag.strip("#").strip() for tag in hashtags}
invalidWords = {word.strip() for word in wordsToRemove}
listValidTags = list(validTags)
hashtags.close()
wordsToRemove.close()
onlyfiles = [ f for f in listdir(mypath) if isfile(join(mypath,f)) ]
for f in onlyfiles:
tags = set()
words = set()
entradaTweets = gzip.open(mypath+f,"r")
for line in entradaTweets:
tags = tags.union({tag.strip("#").strip() for tag in line.split() if tag.startswith("#")})
words = words.union({tweet.strip() for tweet in line.split()})
tagsUsed = validTags & tags
tagUsedTranformed = {str(listValidTags.index(tag)) for tag in tagsUsed}
wordsUsed = words - invalidWords
if (tagUsedTranformed):
saida.write("["+' '.join(tagUsedTranformed) +"] " + ' '.join(wordsUsed)+"\n")
elif (wordsUsed):
saida.write(' '.join(wordsUsed).strip()+"\n")
saida.close()
|
the-stack_0_26848
|
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test that the wallet resends transactions periodically."""
from collections import defaultdict
import time
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import ToHex
from test_framework.mininode import P2PInterface, mininode_lock
from test_framework.test_framework import DefiTestFramework
from test_framework.util import assert_equal, wait_until
class P2PStoreTxInvs(P2PInterface):
def __init__(self):
super().__init__()
self.tx_invs_received = defaultdict(int)
def on_inv(self, message):
# Store how many times invs have been received for each tx.
for i in message.inv:
if i.type == 1:
# save txid
self.tx_invs_received[i.hash] += 1
class ResendWalletTransactionsTest(DefiTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node = self.nodes[0] # alias
node.add_p2p_connection(P2PStoreTxInvs())
self.log.info("Create a new transaction and wait until it's broadcast")
txid = int(node.sendtoaddress(node.getnewaddress(), 1), 16)
# Wallet rebroadcast is first scheduled 1 sec after startup (see
# nNextResend in ResendWalletTransactions()). Sleep for just over a
# second to be certain that it has been called before the first
# setmocktime call below.
time.sleep(1.1)
# Can take a few seconds due to transaction trickling
wait_until(lambda: node.p2p.tx_invs_received[txid] >= 1, lock=mininode_lock)
# Add a second peer since txs aren't rebroadcast to the same peer (see filterInventoryKnown)
node.add_p2p_connection(P2PStoreTxInvs())
self.log.info("Create a block")
# Create and submit a block without the transaction.
# Transactions are only rebroadcast if there has been a block at least five minutes
# after the last time we tried to broadcast. Use mocktime and give an extra minute to be sure.
block_time = int(time.time()) + 6 * 60
node.setmocktime(block_time)
block = create_block(int(node.getbestblockhash(), 16), create_coinbase(node.getblockcount() + 1), block_time)
block.rehash()
block.solve()
node.submitblock(ToHex(block))
# Transaction should not be rebroadcast
node.p2ps[1].sync_with_ping()
assert_equal(node.p2ps[1].tx_invs_received[txid], 0)
self.log.info("Transaction should be rebroadcast after 30 minutes")
# Use mocktime and give an extra 5 minutes to be sure.
rebroadcast_time = int(time.time()) + 41 * 60
node.setmocktime(rebroadcast_time)
wait_until(lambda: node.p2ps[1].tx_invs_received[txid] >= 1, lock=mininode_lock)
if __name__ == '__main__':
ResendWalletTransactionsTest().main()
|
the-stack_0_26849
|
from django import forms
class SearchForm(forms.Form):
q = forms.CharField(
widget=forms.TextInput(attrs={
'id': 'q',
'title': 'Enter a company name or place'
}),
required=True,
label='Search',
help_text="""e.g. <a href="/search/nestle">Nestle</a> or <a href="/search/windsor">Windsor</a>"""
)
|
the-stack_0_26850
|
from model import Generator
from model import Discriminator
from torch.autograd import Variable
from torchvision.utils import save_image
import torch
import torch.nn.functional as F
import numpy as np
import os
import time
import datetime
class Solver(object):
"""
Solver for training and testing StarGAN.
"""
def __init__(self, celeba_loader, rafd_loader, config):
"""Initialize configurations."""
# Data loader.
self.celeba_loader = celeba_loader
self.rafd_loader = rafd_loader
# Model configurations.
self.c_dim = config.c_dim
self.c2_dim = config.c2_dim
self.image_size = config.image_size
self.g_conv_dim = config.g_conv_dim
self.d_conv_dim = config.d_conv_dim
self.g_repeat_num = config.g_repeat_num
self.d_repeat_num = config.d_repeat_num
self.lambda_cls = config.lambda_cls
self.lambda_rec = config.lambda_rec
self.lambda_gp = config.lambda_gp
# Training configurations.
self.dataset = config.dataset
self.batch_size = config.batch_size
self.num_iters = config.num_iters
self.num_iters_decay = config.num_iters_decay
self.g_lr = config.g_lr
self.d_lr = config.d_lr
self.n_critic = config.n_critic
self.beta1 = config.beta1
self.beta2 = config.beta2
self.resume_iters = config.resume_iters
self.selected_attrs = config.selected_attrs
# Test configurations.
self.test_iters = config.test_iters
# Miscellaneous.
self.use_tensorboard = config.use_tensorboard
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Directories.
self.log_dir = config.log_dir
self.sample_dir = config.sample_dir
self.model_save_dir = config.model_save_dir
self.result_dir = config.result_dir
# Step size.
self.log_step = config.log_step
self.sample_step = config.sample_step
self.model_save_step = config.model_save_step
self.lr_update_step = config.lr_update_step
# Build the model and tensorboard.
self.build_model()
if self.use_tensorboard:
self.build_tensorboard()
def build_model(self):
"""Create a generator and a discriminator."""
if self.dataset in ['CelebA', 'RaFD']:
self.G = Generator(self.g_conv_dim, self.c_dim, self.g_repeat_num)
self.D = Discriminator(self.image_size, self.d_conv_dim, self.c_dim, self.d_repeat_num)
elif self.dataset in ['Both']:
self.G = Generator(self.g_conv_dim, self.c_dim+self.c2_dim+2, self.g_repeat_num) # 2 for mask vector.
self.D = Discriminator(self.image_size, self.d_conv_dim, self.c_dim+self.c2_dim, self.d_repeat_num)
self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr, [self.beta1, self.beta2])
self.d_optimizer = torch.optim.Adam(self.D.parameters(), self.d_lr, [self.beta1, self.beta2])
self.print_network(self.G, 'G')
self.print_network(self.D, 'D')
self.G.to(self.device)
self.D.to(self.device)
def print_network(self, model, name):
"""Print out the network information."""
num_params = 0
for p in model.parameters():
num_params += p.numel()
print(model)
print(name)
print("The number of parameters: {}".format(num_params))
def restore_model(self, resume_iters):
"""Restore the trained generator and discriminator."""
print('Loading the trained models from step {}...'.format(resume_iters))
G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(resume_iters))
D_path = os.path.join(self.model_save_dir, '{}-D.ckpt'.format(resume_iters))
self.G.load_state_dict(torch.load(G_path, map_location=lambda storage, loc: storage))
self.D.load_state_dict(torch.load(D_path, map_location=lambda storage, loc: storage))
def build_tensorboard(self):
"""Build a tensorboard logger."""
from logger import Logger
self.logger = Logger(self.log_dir)
def update_lr(self, g_lr, d_lr):
"""Decay learning rates of the generator and discriminator."""
for param_group in self.g_optimizer.param_groups:
param_group['lr'] = g_lr
for param_group in self.d_optimizer.param_groups:
param_group['lr'] = d_lr
def reset_grad(self):
"""Reset the gradient buffers."""
self.g_optimizer.zero_grad()
self.d_optimizer.zero_grad()
def denorm(self, x):
"""Convert the range from [-1, 1] to [0, 1]."""
out = (x + 1) / 2
return out.clamp_(0, 1)
def gradient_penalty(self, y, x):
"""Compute gradient penalty: (L2_norm(dy/dx) - 1)**2."""
weight = torch.ones(y.size()).to(self.device)
dydx = torch.autograd.grad(outputs=y,
inputs=x,
grad_outputs=weight,
retain_graph=True,
create_graph=True,
only_inputs=True)[0]
dydx = dydx.view(dydx.size(0), -1)
dydx_l2norm = torch.sqrt(torch.sum(dydx**2, dim=1))
return torch.mean((dydx_l2norm-1)**2)
def label2onehot(self, labels, dim):
"""Convert label indices to one-hot vectors."""
batch_size = labels.size(0)
out = torch.zeros(batch_size, dim)
out[np.arange(batch_size), labels.long()] = 1
return out
def create_labels(self, c_org, c_dim=5, dataset='CelebA', selected_attrs=None):
"""Generate target domain labels for debugging and testing."""
# Get hair color indices.
if dataset == 'CelebA':
hair_color_indices = []
for i, attr_name in enumerate(selected_attrs):
if attr_name in ['Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Gray_Hair']:
hair_color_indices.append(i)
c_trg_list = []
for i in range(c_dim):
if dataset == 'CelebA':
c_trg = c_org.clone()
if i in hair_color_indices: # Set one hair color to 1 and the rest to 0.
c_trg[:, i] = 1
for j in hair_color_indices:
if j != i:
c_trg[:, j] = 0
else:
c_trg[:, i] = (c_trg[:, i] == 0) # Reverse attribute value.
elif dataset == 'RaFD':
c_trg = self.label2onehot(torch.ones(c_org.size(0))*i, c_dim)
c_trg_list.append(c_trg.to(self.device))
return c_trg_list
def classification_loss(self, logit, target, dataset='CelebA'):
"""Compute binary or softmax cross entropy loss."""
if dataset == 'CelebA':
return F.binary_cross_entropy_with_logits(logit, target, size_average=False) / logit.size(0)
elif dataset == 'RaFD':
return F.cross_entropy(logit, target)
def train(self):
"""
Train StarGAN within a single dataset.
"""
# Set data loader.
if self.dataset == 'CelebA':
data_loader = self.celeba_loader
elif self.dataset == 'RaFD':
data_loader = self.rafd_loader
# Fetch fixed inputs for debugging.
data_iter = iter(data_loader)
x_fixed, c_org = next(data_iter)
x_fixed = x_fixed.to(self.device)
c_fixed_list = self.create_labels(c_org, self.c_dim, self.dataset, self.selected_attrs)
# Learning rate cache for decaying.
g_lr = self.g_lr
d_lr = self.d_lr
# Start training from scratch or resume training.
start_iters = 0
if self.resume_iters:
start_iters = self.resume_iters
self.restore_model(self.resume_iters)
# Start training.
print('Start training...')
start_time = time.time()
for i in range(start_iters, self.num_iters):
# =================================================================================== #
# 1. Preprocess input data #
# =================================================================================== #
# Fetch real images and labels.
try:
x_real, label_org = next(data_iter)
except:
data_iter = iter(data_loader)
x_real, label_org = next(data_iter)
# Generate target domain labels randomly.
rand_idx = torch.randperm(label_org.size(0))
label_trg = label_org[rand_idx]
if self.dataset == 'CelebA':
c_org = label_org.clone()
c_trg = label_trg.clone()
elif self.dataset == 'RaFD':
c_org = self.label2onehot(label_org, self.c_dim)
c_trg = self.label2onehot(label_trg, self.c_dim)
x_real = x_real.to(self.device) # Input images.
c_org = c_org.to(self.device) # Original domain labels.
c_trg = c_trg.to(self.device) # Target domain labels.
label_org = label_org.to(self.device) # Labels for computing classification loss.
label_trg = label_trg.to(self.device) # Labels for computing classification loss.
# =================================================================================== #
# 2. Train the discriminator #
# =================================================================================== #
# Compute loss with real images.
out_src, out_cls = self.D(x_real)
d_loss_real = - torch.mean(out_src)
d_loss_cls = self.classification_loss(out_cls, label_org, self.dataset)
# Compute loss with fake images.
x_fake = self.G(x_real, c_trg)
out_src, out_cls = self.D(x_fake.detach())
d_loss_fake = torch.mean(out_src)
# Compute loss for gradient penalty.
alpha = torch.rand(x_real.size(0), 1, 1, 1).to(self.device)
x_hat = (alpha * x_real.data + (1 - alpha) * x_fake.data).requires_grad_(True)
out_src, _ = self.D(x_hat)
d_loss_gp = self.gradient_penalty(out_src, x_hat)
# Backward and optimize.
d_loss = d_loss_real + d_loss_fake + self.lambda_cls * d_loss_cls + self.lambda_gp * d_loss_gp
self.reset_grad()
d_loss.backward()
self.d_optimizer.step()
# Logging.
loss = {}
loss['D/loss_real'] = d_loss_real.item()
loss['D/loss_fake'] = d_loss_fake.item()
loss['D/loss_cls'] = d_loss_cls.item()
loss['D/loss_gp'] = d_loss_gp.item()
# =================================================================================== #
# 3. Train the generator #
# =================================================================================== #
if (i+1) % self.n_critic == 0:
# Original-to-target domain.
x_fake = self.G(x_real, c_trg)
out_src, out_cls = self.D(x_fake)
g_loss_fake = - torch.mean(out_src)
g_loss_cls = self.classification_loss(out_cls, label_trg, self.dataset)
# Target-to-original domain.
x_reconst = self.G(x_fake, c_org)
g_loss_rec = torch.mean(torch.abs(x_real - x_reconst))
# Backward and optimize.
g_loss = g_loss_fake + self.lambda_rec * g_loss_rec + self.lambda_cls * g_loss_cls
self.reset_grad()
g_loss.backward()
self.g_optimizer.step()
# Logging.
loss['G/loss_fake'] = g_loss_fake.item()
loss['G/loss_rec'] = g_loss_rec.item()
loss['G/loss_cls'] = g_loss_cls.item()
# =================================================================================== #
# 4. Miscellaneous #
# =================================================================================== #
# Print out training information.
if (i+1) % self.log_step == 0:
et = time.time() - start_time
et = str(datetime.timedelta(seconds=et))[:-7]
log = "Elapsed [{}], Iteration [{}/{}]".format(et, i+1, self.num_iters)
for tag, value in loss.items():
log += ", {}: {:.4f}".format(tag, value)
print(log)
if self.use_tensorboard:
for tag, value in loss.items():
self.logger.scalar_summary(tag, value, i+1)
# Translate fixed images for debugging.
if (i+1) % self.sample_step == 0:
with torch.no_grad():
x_fake_list = [x_fixed]
for c_fixed in c_fixed_list:
x_fake_list.append(self.G(x_fixed, c_fixed))
x_concat = torch.cat(x_fake_list, dim=3)
sample_path = os.path.join(self.sample_dir, '{}-images.jpg'.format(i+1))
save_image(self.denorm(x_concat.data.cpu()), sample_path, nrow=1, padding=0)
print('Saved real and fake images into {}...'.format(sample_path))
# Save model checkpoints.
if (i+1) % self.model_save_step == 0:
G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(i+1))
D_path = os.path.join(self.model_save_dir, '{}-D.ckpt'.format(i+1))
torch.save(self.G.state_dict(), G_path)
torch.save(self.D.state_dict(), D_path)
print('Saved model checkpoints into {}...'.format(self.model_save_dir))
# Decay learning rates.
if (i+1) % self.lr_update_step == 0 and (i+1) > (self.num_iters - self.num_iters_decay):
g_lr -= (self.g_lr / float(self.num_iters_decay))
d_lr -= (self.d_lr / float(self.num_iters_decay))
self.update_lr(g_lr, d_lr)
print ('Decayed learning rates, g_lr: {}, d_lr: {}.'.format(g_lr, d_lr))
def train_multi(self):
"""Train StarGAN with multiple datasets."""
# Data iterators.
celeba_iter = iter(self.celeba_loader)
rafd_iter = iter(self.rafd_loader)
# Fetch fixed inputs for debugging.
x_fixed, c_org = next(celeba_iter)
x_fixed = x_fixed.to(self.device)
c_celeba_list = self.create_labels(c_org, self.c_dim, 'CelebA', self.selected_attrs)
c_rafd_list = self.create_labels(c_org, self.c2_dim, 'RaFD')
zero_celeba = torch.zeros(x_fixed.size(0), self.c_dim).to(self.device) # Zero vector for CelebA.
zero_rafd = torch.zeros(x_fixed.size(0), self.c2_dim).to(self.device) # Zero vector for RaFD.
mask_celeba = self.label2onehot(torch.zeros(x_fixed.size(0)), 2).to(self.device) # Mask vector: [1, 0].
mask_rafd = self.label2onehot(torch.ones(x_fixed.size(0)), 2).to(self.device) # Mask vector: [0, 1].
# Learning rate cache for decaying.
g_lr = self.g_lr
d_lr = self.d_lr
# Start training from scratch or resume training.
start_iters = 0
if self.resume_iters:
start_iters = self.resume_iters
self.restore_model(self.resume_iters)
# Start training.
print('Start training...')
start_time = time.time()
for i in range(start_iters, self.num_iters):
for dataset in ['CelebA', 'RaFD']:
# =================================================================================== #
# 1. Preprocess input data #
# =================================================================================== #
# Fetch real images and labels.
data_iter = celeba_iter if dataset == 'CelebA' else rafd_iter
try:
x_real, label_org = next(data_iter)
except:
if dataset == 'CelebA':
celeba_iter = iter(self.celeba_loader)
x_real, label_org = next(celeba_iter)
elif dataset == 'RaFD':
rafd_iter = iter(self.rafd_loader)
x_real, label_org = next(rafd_iter)
# Generate target domain labels randomly.
rand_idx = torch.randperm(label_org.size(0))
label_trg = label_org[rand_idx]
if dataset == 'CelebA':
c_org = label_org.clone()
c_trg = label_trg.clone()
zero = torch.zeros(x_real.size(0), self.c2_dim)
mask = self.label2onehot(torch.zeros(x_real.size(0)), 2)
c_org = torch.cat([c_org, zero, mask], dim=1)
c_trg = torch.cat([c_trg, zero, mask], dim=1)
elif dataset == 'RaFD':
c_org = self.label2onehot(label_org, self.c2_dim)
c_trg = self.label2onehot(label_trg, self.c2_dim)
zero = torch.zeros(x_real.size(0), self.c_dim)
mask = self.label2onehot(torch.ones(x_real.size(0)), 2)
c_org = torch.cat([zero, c_org, mask], dim=1)
c_trg = torch.cat([zero, c_trg, mask], dim=1)
x_real = x_real.to(self.device) # Input images.
c_org = c_org.to(self.device) # Original domain labels.
c_trg = c_trg.to(self.device) # Target domain labels.
label_org = label_org.to(self.device) # Labels for computing classification loss.
label_trg = label_trg.to(self.device) # Labels for computing classification loss.
# =================================================================================== #
# 2. Train the discriminator #
# =================================================================================== #
# Compute loss with real images.
out_src, out_cls = self.D(x_real)
out_cls = out_cls[:, :self.c_dim] if dataset == 'CelebA' else out_cls[:, self.c_dim:]
d_loss_real = - torch.mean(out_src)
d_loss_cls = self.classification_loss(out_cls, label_org, dataset)
# Compute loss with fake images.
x_fake = self.G(x_real, c_trg)
out_src, _ = self.D(x_fake.detach())
d_loss_fake = torch.mean(out_src)
# Compute loss for gradient penalty.
alpha = torch.rand(x_real.size(0), 1, 1, 1).to(self.device)
x_hat = (alpha * x_real.data + (1 - alpha) * x_fake.data).requires_grad_(True)
out_src, _ = self.D(x_hat)
d_loss_gp = self.gradient_penalty(out_src, x_hat)
# Backward and optimize.
d_loss = d_loss_real + d_loss_fake + self.lambda_cls * d_loss_cls + self.lambda_gp * d_loss_gp
self.reset_grad()
d_loss.backward()
self.d_optimizer.step()
# Logging.
loss = {}
loss['D/loss_real'] = d_loss_real.item()
loss['D/loss_fake'] = d_loss_fake.item()
loss['D/loss_cls'] = d_loss_cls.item()
loss['D/loss_gp'] = d_loss_gp.item()
# =================================================================================== #
# 3. Train the generator #
# =================================================================================== #
if (i+1) % self.n_critic == 0:
# Original-to-target domain.
x_fake = self.G(x_real, c_trg)
out_src, out_cls = self.D(x_fake)
out_cls = out_cls[:, :self.c_dim] if dataset == 'CelebA' else out_cls[:, self.c_dim:]
g_loss_fake = - torch.mean(out_src)
g_loss_cls = self.classification_loss(out_cls, label_trg, dataset)
# Target-to-original domain.
x_reconst = self.G(x_fake, c_org)
g_loss_rec = torch.mean(torch.abs(x_real - x_reconst))
# Backward and optimize.
g_loss = g_loss_fake + self.lambda_rec * g_loss_rec + self.lambda_cls * g_loss_cls
self.reset_grad()
g_loss.backward()
self.g_optimizer.step()
# Logging.
loss['G/loss_fake'] = g_loss_fake.item()
loss['G/loss_rec'] = g_loss_rec.item()
loss['G/loss_cls'] = g_loss_cls.item()
# =================================================================================== #
# 4. Miscellaneous #
# =================================================================================== #
# Print out training info.
if (i+1) % self.log_step == 0:
et = time.time() - start_time
et = str(datetime.timedelta(seconds=et))[:-7]
log = "Elapsed [{}], Iteration [{}/{}], Dataset [{}]".format(et, i+1, self.num_iters, dataset)
for tag, value in loss.items():
log += ", {}: {:.4f}".format(tag, value)
print(log)
if self.use_tensorboard:
for tag, value in loss.items():
self.logger.scalar_summary(tag, value, i+1)
# Translate fixed images for debugging.
if (i+1) % self.sample_step == 0:
with torch.no_grad():
x_fake_list = [x_fixed]
for c_fixed in c_celeba_list:
c_trg = torch.cat([c_fixed, zero_rafd, mask_celeba], dim=1)
x_fake_list.append(self.G(x_fixed, c_trg))
for c_fixed in c_rafd_list:
c_trg = torch.cat([zero_celeba, c_fixed, mask_rafd], dim=1)
x_fake_list.append(self.G(x_fixed, c_trg))
x_concat = torch.cat(x_fake_list, dim=3)
sample_path = os.path.join(self.sample_dir, '{}-images.jpg'.format(i+1))
save_image(self.denorm(x_concat.data.cpu()), sample_path, nrow=1, padding=0)
print('Saved real and fake images into {}...'.format(sample_path))
# Save model checkpoints.
if (i+1) % self.model_save_step == 0:
G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(i+1))
D_path = os.path.join(self.model_save_dir, '{}-D.ckpt'.format(i+1))
torch.save(self.G.state_dict(), G_path)
torch.save(self.D.state_dict(), D_path)
print('Saved model checkpoints into {}...'.format(self.model_save_dir))
# Decay learning rates.
if (i+1) % self.lr_update_step == 0 and (i+1) > (self.num_iters - self.num_iters_decay):
g_lr -= (self.g_lr / float(self.num_iters_decay))
d_lr -= (self.d_lr / float(self.num_iters_decay))
self.update_lr(g_lr, d_lr)
print ('Decayed learning rates, g_lr: {}, d_lr: {}.'.format(g_lr, d_lr))
def test(self):
"""Translate images using StarGAN trained on a single dataset."""
# Load the trained generator.
self.restore_model(self.test_iters)
# Set data loader.
if self.dataset == 'CelebA':
data_loader = self.celeba_loader
elif self.dataset == 'RaFD':
data_loader = self.rafd_loader
with torch.no_grad():
for i, (x_real, c_org) in enumerate(data_loader):
# Prepare input images and target domain labels.
x_real = x_real.to(self.device)
c_trg_list = self.create_labels(c_org, self.c_dim, self.dataset, self.selected_attrs)
# Translate images.
x_fake_list = [x_real]
for c_trg in c_trg_list:
x_fake_list.append(self.G(x_real, c_trg))
# Save the translated images.
x_concat = torch.cat(x_fake_list, dim=3)
result_path = os.path.join(self.result_dir, '{}-images.jpg'.format(i+1))
save_image(self.denorm(x_concat.data.cpu()), result_path, nrow=1, padding=0)
print('Saved real and fake images into {}...'.format(result_path))
def test_multi(self):
"""Translate images using StarGAN trained on multiple datasets."""
# Load the trained generator.
self.restore_model(self.test_iters)
with torch.no_grad():
for i, (x_real, c_org) in enumerate(self.celeba_loader):
# Prepare input images and target domain labels.
x_real = x_real.to(self.device)
c_celeba_list = self.create_labels(c_org, self.c_dim, 'CelebA', self.selected_attrs)
c_rafd_list = self.create_labels(c_org, self.c2_dim, 'RaFD')
zero_celeba = torch.zeros(x_real.size(0), self.c_dim).to(self.device) # Zero vector for CelebA.
zero_rafd = torch.zeros(x_real.size(0), self.c2_dim).to(self.device) # Zero vector for RaFD.
mask_celeba = self.label2onehot(torch.zeros(x_real.size(0)), 2).to(self.device) # Mask vector: [1, 0].
mask_rafd = self.label2onehot(torch.ones(x_real.size(0)), 2).to(self.device) # Mask vector: [0, 1].
# Translate images.
x_fake_list = [x_real]
for c_celeba in c_celeba_list:
c_trg = torch.cat([c_celeba, zero_rafd, mask_celeba], dim=1)
x_fake_list.append(self.G(x_real, c_trg))
for c_rafd in c_rafd_list:
c_trg = torch.cat([zero_celeba, c_rafd, mask_rafd], dim=1)
x_fake_list.append(self.G(x_real, c_trg))
# Save the translated images.
x_concat = torch.cat(x_fake_list, dim=3)
result_path = os.path.join(self.result_dir, '{}-images.jpg'.format(i+1))
save_image(self.denorm(x_concat.data.cpu()), result_path, nrow=1, padding=0)
print('Saved real and fake images into {}...'.format(result_path))
|
the-stack_0_26851
|
# -*- coding: utf-8 -*-
#
# This file is part of the pyFDA project hosted at https://github.com/chipmuenk/pyfda
#
# Copyright © pyFDA Project Contributors
# Licensed under the terms of the MIT License
# (see file LICENSE in root directory for details)
"""
Helper classes and functions for generating and simulating fixpoint filters
"""
import sys
import logging
logger = logging.getLogger(__name__)
#import numpy as np
import pyfda.filterbroker as fb
import pyfda.pyfda_fix_lib as fix
from ..compat import (QWidget, QLabel, QLineEdit, QComboBox, QPushButton, QIcon,
QVBoxLayout, QHBoxLayout, QFrame,
pyqtSignal)
from migen import Cat, If, Replicate, Signal
from pyfda.pyfda_qt_lib import qget_cmb_box, qset_cmb_box
from pyfda.pyfda_rc import params
from pyfda.pyfda_lib import qstr, safe_eval, to_html
def rescale(mod, sig_i, QI, QO):
"""
Change word length of input signal `sig_i` to `WO` bits, using the
quantization and saturation methods specified by ``QO['quant']`` and
``QO['ovfl']``.
Parameters
----------
mod: Module (migen)
instance of migen module
sig_i: Signal (migen)
Signal to be requantized
QI: dict
Quantization dict for input word, only the keys 'WI' and 'WF' for integer
and fractional wordlength are evaluated. QI['WI'] = 2 and QI['WF'] = 13
e.g. define Q-Format '2.13' with 2 integer, 13 fractional bits and 1 implied
sign bit = 16 bits total.
QO: dict
Quantization dict for output word format; the keys 'WI' and 'WF' for
integer and fractional wordlength are evaluated as well as the keys 'quant'
and 'ovfl' describing requantization and overflow behaviour.
**Input and output word are aligned at their binary points.**
The following shows an example of rescaling an input word from Q2.4 to Q0.3
using wrap-around and truncation. It's easy to see that for simple wrap-around
logic, the sign of the result may change.
::
S | WI1 | WI0 * WF0 | WF1 | WF2 | WF3 : WI = 2, WF = 4, W = 7
0 | 1 | 0 * 1 | 0 | 1 | 1 = 43 (dec) or 43/16 = 2 + 11/16 (float)
*
| S * WF0 | WF1 | WF2 : WI = 0, WF = 3, W = 4
0 * 1 | 0 | 1 = 7 (dec) or 7/8 (float)
The float or "real (world) value" is calculated by multiplying the integer
value by 2 ** (-WF).
For requantizing two numbers to the same WI and WF, imagine both binary numbers
to be right-aligned. Changes in the number of integer bits `dWI` and fractional
bits `dWF` are handled separately.
Fractional Bits:
- For reducing the number of fractional bits by `dWF`, simply right-shift the
integer number by `dWF`. For rounding, add '1' to the bit below the truncation
point before right-shifting.
- Extend the number of fractional bits by left-shifting the integer by `dWF`,
LSB's are filled with zeros.
Integer Bits:
- For reducing the number of integer bits by `dWI`, simply right-shift the
integer by `dWI`.
- The number of fractional bits is SIGN-EXTENDED by filling up the left-most
bits with the sign bit.
"""
WI_I = QI['WI']
WI_F = QI['WF']
WI = WI_I + WI_F + 1
WO_I = QO['WI']
WO_F = QO['WF']
WO = WO_I + WO_F + 1
dWF = WI_F - WO_F # difference of fractional lengths
dWI = WI_I - WO_I # difference of integer lengths
# max. resp. min, output values
MIN_o = - 1 << (WO - 1)
MAX_o = -MIN_o - 1
sig_i_q = Signal((max(WI,WO), True))
sig_o = Signal((WO, True))
logger.debug("rescale: dWI={0}, dWF={1}, QU:{2}, OV:{3}".format(dWI, dWF, QO['quant'], QO['ovfl'] ))
if dWF <= 0: # extend fractional word length of output word
mod.comb += sig_i_q.eq(sig_i << -dWF) # shift input right by -dWF
else: # dWF > 0, fractional output word length needs to be shortened
if QO['quant'] == 'round':
# add half an LSB (1 << (dWF - 1)) and divide by 2^dWF (shift left by dWF)
mod.comb += sig_i_q.eq((sig_i + (1 << (dWF - 1))) >> dWF)
elif QO['quant'] == 'floor': # just divide by 2^dWF (shift left by dWF)
mod.comb += sig_i_q.eq(sig_i >> dWF)
elif QO['quant'] == 'fix':
# add sign bit as LSB (1 << dWF) and divide by 2^dWF (shift left by dWF)
mod.comb += sig_i_q.eq((sig_i + (sig_i[-1] << dWF)) >> dWF)
else:
raise Exception(u'Unknown quantization method "%s"!'%(QI['quant']))
if dWI < 0: # WI_I < WO_I, sign extend integer part
#mod.comb += sig_o.eq(sig_i_q >> -dWI)
mod.comb += sig_o.eq(Cat(sig_i_q, Replicate(sig_i_q[-1], -dWI)))
elif dWI == 0: # WI = WO, don't change integer part
mod.comb += sig_o.eq(sig_i_q)
elif QO['ovfl'] == 'sat':
mod.comb += \
If(sig_i_q[-1] == 1,
If(sig_i_q < MIN_o,
#If(sig_i_q[-dWI-1:-1] != Replicate(sig_i_q[-1], dWI),
sig_o.eq(MIN_o)
).Else(sig_o.eq(sig_i_q))#[:-dWI-1]))# >> dWI
).Elif(sig_i_q > MAX_o,
#).Elif(sig_i_q[WO-1:] == 0b01,
sig_o.eq(MAX_o)
).Else(sig_o.eq(sig_i_q)#[:-dWI-1])# >> dWI)
)
elif QO['ovfl'] == 'wrap': # wrap around (shift left)
mod.comb += sig_o.eq(sig_i_q)# >> dWI)
#mod.comb += sig_o.eq(Replicate(sig_i_q[-1], abs(dWI)))
#mod.comb += sig_o.eq(sig_i_q[-dWI-1:-1])
# =============================================================================
# If(sig_i_q[-1] == 1,
# If(sig_i_q[-1:-dWI-1] != Replicate(sig_i_q[-1], dWI),
# #If(sig_i_q < MIN_o,
# #If(sig_i_q[WO-1:] == 0b10,
# sig_o.eq(MIN_o)
# ).Else(sig_o.eq(sig_i_q)# >> dWI
# ).Elif(sig_i_q > MAX_o,
# #).Elif(sig_i_q[WO-1:] == 0b01,
# sig_o.eq(MAX_o)
# ).Else(sig_o.eq(sig_i_q)# >> dWI)
# )
# =============================================================================
else:
raise Exception(u'Unknown overflow method "%s"!'%(QI['ovfl']))
return sig_o
#------------------------------------------------------------------------------
class UI_W(QWidget):
"""
Widget for entering integer and fractional bits. The result can be read out
via the attributes `self.WI`, `self.WF` and `self.W`.
The constructor accepts a dictionary for initial widget settings.
The following keys are defined; default values are used for missing keys:
'label' : 'WI.WF' # widget label
'lbl_sep' : '.' # label between WI and WF field
'max_led_width' : 30 # max. length of lineedit field
'WI' : 0 # number of frac. *bits*
'WI_len' : 2 # max. number of integer *digits*
'tip_WI' : 'Number of integer bits' # Mouse-over tooltip
'WF' : 15 # number of frac. *bits*
'WF_len' : 2 # max. number of frac. *digits*
'tip_WF' : 'Number of frac. bits' # Mouse-over tooltip
'enabled' : True # Is widget enabled?
'visible' : True # Is widget visible?
'fractional' : True # Display WF, otherwise WF=0
'lock_visible' : False # Pushbutton for locking visible
'tip_lock' : 'Lock input/output quant.'# Tooltip for lock push button
"""
# incoming,
#sig_rx = pyqtSignal(object)
# outcgoing
sig_tx = pyqtSignal(object)
def __init__(self, parent, q_dict, **kwargs):
super(UI_W, self).__init__(parent)
self.q_dict = q_dict # pass a dict with initial settings for construction
#logger.warning(self.q_dict)
self._construct_UI(**kwargs)
self.ui2dict() # initialize the class attributes
def _construct_UI(self, **kwargs):
"""
Construct widget from quantization dict, individual settings and
the default dict below """
# default settings
dict_ui = {'label':'WI.WF', 'lbl_sep':'.', 'max_led_width':30,
'WI':0, 'WI_len':2, 'tip_WI':'Number of integer bits',
'WF':15,'WF_len':2, 'tip_WF':'Number of fractional bits',
'enabled':True, 'visible':True, 'fractional':True,
'combo_visible':False, 'combo_items':['auto', 'full', 'man'],
'tip_combo':'Calculate Acc. width.',
'lock_visible':False, 'tip_lock':'Lock input/output quantization.'
} #: default values
if self.q_dict:
dict_ui.update(self.q_dict)
for k, v in kwargs.items():
if k not in dict_ui:
logger.warning("Unknown key {0}".format(k))
else:
dict_ui.update({k:v})
if not dict_ui['fractional']:
dict_ui['WF'] = 0
self.WI = dict_ui['WI']
self.WF = dict_ui['WF']
self.W = int(self.WI + self.WF + 1)
if self.q_dict:
self.q_dict.update({'WI':self.WI, 'WF':self.WF, 'W':self.W})
else:
self.q_dict = {'WI':self.WI, 'WF':self.WF, 'W':self.W}
lblW = QLabel(to_html(dict_ui['label'], frmt='bi'), self)
self.cmbW = QComboBox(self)
self.cmbW.addItems(dict_ui['combo_items'])
self.cmbW.setVisible(dict_ui['combo_visible'])
self.cmbW.setToolTip(dict_ui['tip_combo'])
self.cmbW.setObjectName("cmbW")
self.butLock = QPushButton(self)
self.butLock.setCheckable(True)
self.butLock.setChecked(False)
self.butLock.setVisible(dict_ui['lock_visible'])
self.butLock.setToolTip(dict_ui['tip_lock'])
self.ledWI = QLineEdit(self)
self.ledWI.setToolTip(dict_ui['tip_WI'])
self.ledWI.setMaxLength(dict_ui['WI_len']) # maximum of 2 digits
self.ledWI.setFixedWidth(dict_ui['max_led_width']) # width of lineedit in points(?)
self.ledWI.setObjectName("WI")
lblDot = QLabel(dict_ui['lbl_sep'], self)
lblDot.setVisible(dict_ui['fractional'])
self.ledWF = QLineEdit(self)
self.ledWF.setToolTip(dict_ui['tip_WF'])
self.ledWF.setMaxLength(dict_ui['WI_len']) # maximum of 2 digits
self.ledWF.setFixedWidth(dict_ui['max_led_width']) # width of lineedit in points(?)
self.ledWF.setVisible(dict_ui['fractional'])
self.ledWF.setObjectName("WF")
layH = QHBoxLayout()
layH.addWidget(lblW)
layH.addStretch()
layH.addWidget(self.cmbW)
layH.addWidget(self.butLock)
layH.addWidget(self.ledWI)
layH.addWidget(lblDot)
layH.addWidget(self.ledWF)
layH.setContentsMargins(0,0,0,0)
frmMain = QFrame(self)
frmMain.setLayout(layH)
layVMain = QVBoxLayout() # Widget main layout
layVMain.addWidget(frmMain)
layVMain.setContentsMargins(0,5,0,0)#*params['wdg_margins'])
self.setLayout(layVMain)
#----------------------------------------------------------------------
# INITIAL SETTINGS
#----------------------------------------------------------------------
self.ledWI.setText(qstr(dict_ui['WI']))
self.ledWF.setText(qstr(dict_ui['WF']))
frmMain.setEnabled(dict_ui['enabled'])
frmMain.setVisible(dict_ui['visible'])
#----------------------------------------------------------------------
# LOCAL SIGNALS & SLOTs
#----------------------------------------------------------------------
self.ledWI.editingFinished.connect(self.ui2dict)
self.ledWF.editingFinished.connect(self.ui2dict)
self.butLock.clicked.connect(self.but_clicked)
self.cmbW.currentIndexChanged.connect(self.ui2dict)
# initialize button icon
self.but_clicked(self.butLock.isChecked())
def quant_coeffs(self, q_dict, coeffs):
"""
Quantize the coefficients, scale and convert them to integer and return them
as a list of integers
This is called every time one of the coefficient subwidgets is edited or changed.
Parameters:
-----------
None
Returns:
--------
A list of integer coeffcients, quantized and scaled with the settings
of the passed quantization dict
"""
# Create coefficient quantizer instances using the quantization parameters dict
# collected in `input_widgets/input_coeffs.py` (and stored in the central filter dict)
Q_coeff = fix.Fixed(q_dict)
Q_coeff.frmt = 'dec' # always use decimal format for coefficients
if coeffs is None:
logger.error("Coeffs empty!")
# quantize floating point coefficients and convert them to the
# selected numeric format (hex, bin, dec ...) with the selected scale (WI.WF),
# next convert array float -> array of fixp - > list of int (scaled by 2^WF)
return list(Q_coeff.float2frmt(coeffs) * (1 << Q_coeff.WF))
#--------------------------------------------------------------------------
def but_clicked(self, clicked):
"""
Update the icon of the push button depending on its state
"""
if clicked:
self.butLock.setIcon(QIcon(':/lock-locked.svg'))
else:
self.butLock.setIcon(QIcon(':/lock-unlocked.svg'))
q_icon_size = self.butLock.iconSize() # <- uncomment this for manual sizing
self.butLock.setIconSize(q_icon_size)
dict_sig = {'sender':__name__, 'ui':'butLock'}
self.sig_tx.emit(dict_sig)
#--------------------------------------------------------------------------
def ui2dict(self):
"""
Update the attributes `self.WI`, `self.WF` and `self.W` and `self.q_dict`
when one of the QLineEdit widgets has been edited.
Emit a signal with `{'ui':objectName of the sender}`.
"""
self.WI = int(safe_eval(self.ledWI.text(), self.WI, return_type="int", sign='pos'))
self.ledWI.setText(qstr(self.WI))
self.WF = int(safe_eval(self.ledWF.text(), self.WF, return_type="int", sign='pos'))
self.ledWF.setText(qstr(self.WF))
self.W = int(self.WI + self.WF + 1)
self.q_dict.update({'WI':self.WI, 'WF':self.WF, 'W':self.W})
if self.sender():
name = self.sender().objectName()
logger.debug("sender: {0}".format(name))
dict_sig = {'sender':__name__, 'ui':name}
self.sig_tx.emit(dict_sig)
else:
logger.error("sender without name, shouldn't happen!")
#--------------------------------------------------------------------------
def dict2ui(self, q_dict=None):
"""
Update the widgets `WI` and `WF` and the corresponding attributes
from the dict passed as the argument
"""
if q_dict is None:
q_dict = self.q_dict
if 'WI' in q_dict:
self.WI = safe_eval(q_dict['WI'], self.WI, return_type="int", sign='pos')
self.ledWI.setText(qstr(self.WI))
else:
logger.warning("No key 'WI' in dict!")
if 'WF' in q_dict:
self.WF = safe_eval(q_dict['WF'], self.WF, return_type="int", sign='pos')
self.ledWF.setText(qstr(self.WF))
else:
logger.warning("No key 'WF' in dict!")
self.W = self.WF + self.WI + 1
#------------------------------------------------------------------------------
#
#==============================================================================
class UI_Q(QWidget):
"""
Widget for selecting quantization / overflow options. The result can be read out
via the attributes `self.ovfl` and `self.quant`.
The constructor accepts a reference to the quantization dictionary for
initial widget settings and for (re-)storing values.
The following keys are defined; default values are used for missing keys:
'label_q' : 'Quant.' # widget label
'tip_q' : 'Select the kind of quantization.' # Mouse-over tooltip
'enabled' : True # Is widget enabled?
'visible' : True # Is widget visible?
"""
# incoming,
#sig_rx = pyqtSignal(object)
# outcgoing
sig_tx = pyqtSignal(object)
def __init__(self, parent, q_dict, **kwargs):
super(UI_Q, self).__init__(parent)
self.q_dict = q_dict
self._construct_UI(**kwargs)
def _construct_UI(self, **kwargs):
""" Construct widget """
dict_ui = {'label':'',
'label_q':'Quant.', 'tip_q':'Select the kind of quantization.',
'cmb_q':['round', 'fix', 'floor'], 'cur_q':'round',
'label_ov':'Ovfl.', 'tip_ov':'Select overflow behaviour.',
'cmb_ov':['wrap', 'sat'], 'cur_ov':'wrap',
'enabled':True, 'visible':True
} #: default widget settings
if 'quant' in self.q_dict and self.q_dict['quant'] in dict_ui['cmb_q']:
dict_ui['cur_q'] = self.q_dict['quant']
if 'ovfl' in self.q_dict and self.q_dict['ovfl'] in dict_ui['cmb_ov']:
dict_ui['cur_ov'] = self.q_dict['ovfl']
for key, val in kwargs.items():
dict_ui.update({key:val})
# dict_ui.update(map(kwargs)) # same as above?
lblQuant = QLabel(dict_ui['label_q'], self)
self.cmbQuant = QComboBox(self)
self.cmbQuant.addItems(dict_ui['cmb_q'])
qset_cmb_box(self.cmbQuant, dict_ui['cur_q'])
self.cmbQuant.setToolTip(dict_ui['tip_q'])
self.cmbQuant.setObjectName('quant')
lblOvfl = QLabel(dict_ui['label_ov'], self)
self.cmbOvfl = QComboBox(self)
self.cmbOvfl.addItems(dict_ui['cmb_ov'])
qset_cmb_box(self.cmbOvfl, dict_ui['cur_ov'])
self.cmbOvfl.setToolTip(dict_ui['tip_ov'])
self.cmbOvfl.setObjectName('ovfl')
# ComboBox size is adjusted automatically to fit the longest element
self.cmbQuant.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.cmbOvfl.setSizeAdjustPolicy(QComboBox.AdjustToContents)
layH = QHBoxLayout()
if dict_ui['label'] != "":
lblW = QLabel(to_html(dict_ui['label'], frmt='bi'), self)
layH.addWidget(lblW)
layH.addStretch()
layH.addWidget(lblOvfl)
layH.addWidget(self.cmbOvfl)
#layH.addStretch(1)
layH.addWidget(lblQuant)
layH.addWidget(self.cmbQuant)
layH.setContentsMargins(0,0,0,0)
frmMain = QFrame(self)
frmMain.setLayout(layH)
layVMain = QVBoxLayout() # Widget main layout
layVMain.addWidget(frmMain)
layVMain.setContentsMargins(0,0,0,0)#*params['wdg_margins'])
self.setLayout(layVMain)
#----------------------------------------------------------------------
# INITIAL SETTINGS
#----------------------------------------------------------------------
self.ovfl = qget_cmb_box(self.cmbOvfl, data=False)
self.quant = qget_cmb_box(self.cmbQuant, data=False)
frmMain.setEnabled(dict_ui['enabled'])
frmMain.setVisible(dict_ui['visible'])
#----------------------------------------------------------------------
# LOCAL SIGNALS & SLOTs
#----------------------------------------------------------------------
self.cmbOvfl.currentIndexChanged.connect(self.ui2dict)
self.cmbQuant.currentIndexChanged.connect(self.ui2dict)
#--------------------------------------------------------------------------
def ui2dict(self):
"""
Update the quantization dict and the attributes `self.ovfl` and
`self.quant` from the UI
"""
self.ovfl = self.cmbOvfl.currentText()
self.quant = self.cmbQuant.currentText()
self.q_dict.update({'ovfl': self.ovfl,
'quant': self.quant})
if self.sender():
name = self.sender().objectName()
dict_sig = {'sender':__name__, 'ui':name}
self.sig_tx.emit(dict_sig)
#--------------------------------------------------------------------------
def dict2ui(self, q_dict):
""" Update UI from passed dictionary """
pass
#==============================================================================
if __name__ == '__main__':
from ..compat import QApplication
app = QApplication(sys.argv)
mainw = UI_W(None)
mainw.show()
app.exec_()
|
the-stack_0_26852
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class TFConvBertModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_mask = True
self.use_token_type_ids = True
self.use_labels = True
self.vocab_size = 99
self.hidden_size = 384
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.embedding_size = 128
self.head_ratio = 2
self.conv_kernel_size = 9
self.num_groups = 1
self.scope = None
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = ConvBertConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
return_dict=True,
)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFConvBertModel(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
inputs = [input_ids, input_mask]
result = model(inputs)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFConvBertForMaskedLM(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = TFConvBertForSequenceClassification(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = TFConvBertForMultipleChoice(config=config)
multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1))
multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1))
multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1))
inputs = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = TFConvBertForTokenClassification(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFConvBertForQuestionAnswering(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class TFConvBertModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
test_pruning = False
test_head_masking = False
test_onnx = False
def setUp(self):
self.model_tester = TFConvBertModelTester(self)
self.config_tester = ConfigTester(self, config_class=ConvBertConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
@slow
def test_saved_model_creation_extended(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
if hasattr(config, "use_cache"):
config.use_cache = True
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
for model_class in self.all_model_classes:
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
num_out = len(model(class_inputs_dict))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=True)
saved_model_dir = os.path.join(tmpdirname, "saved_model", "1")
model = tf.keras.models.load_model(saved_model_dir)
outputs = model(class_inputs_dict)
if self.is_encoder_decoder:
output_hidden_states = outputs["encoder_hidden_states"]
output_attentions = outputs["encoder_attentions"]
else:
output_hidden_states = outputs["hidden_states"]
output_attentions = outputs["attentions"]
self.assertEqual(len(outputs), num_out)
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(output_hidden_states), expected_num_layers)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],
)
@slow
def test_model_from_pretrained(self):
model = TFConvBertModel.from_pretrained("YituTech/conv-bert-base")
self.assertIsNotNone(model)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", self.model_tester.seq_length)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
decoder_key_length = getattr(self.model_tester, "key_length", decoder_seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
def check_decoder_attentions_output(outputs):
out_len = len(outputs)
self.assertEqual(out_len % 2, 0)
decoder_attentions = outputs.decoder_attentions
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length],
)
def check_encoder_attentions_output(outputs):
attentions = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],
)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["use_cache"] = False
config.output_hidden_states = False
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
out_len = len(outputs)
self.assertEqual(config.output_hidden_states, False)
check_encoder_attentions_output(outputs)
if self.is_encoder_decoder:
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(config.output_hidden_states, False)
check_decoder_attentions_output(outputs)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(config.output_hidden_states, False)
check_encoder_attentions_output(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
config.output_hidden_states = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs))
self.assertEqual(model.config.output_hidden_states, True)
check_encoder_attentions_output(outputs)
@require_tf
class TFConvBertModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_masked_lm(self):
model = TFConvBertModel.from_pretrained("YituTech/conv-bert-base")
input_ids = tf.constant([[0, 1, 2, 3, 4, 5]])
output = model(input_ids)[0]
expected_shape = [1, 6, 768]
self.assertEqual(output.shape, expected_shape)
print(output[:, :3, :3])
expected_slice = tf.constant(
[
[
[-0.10334751, -0.37152207, -0.2682219],
[0.20078957, -0.3918426, -0.78811496],
[0.08000169, -0.509474, -0.59314483],
]
]
)
tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4)
|
the-stack_0_26855
|
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup
HERE = path.dirname(path.abspath(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, 'datadog_checks', 'postfix', '__about__.py')) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def get_dependencies():
dep_file = path.join(HERE, 'requirements.in')
if not path.isfile(dep_file):
return []
with open(dep_file, encoding='utf-8') as f:
return f.readlines()
CHECKS_BASE_REQ = 'datadog-checks-base'
setup(
name='datadog-postfix',
version=ABOUT['__version__'],
description='The Postfix check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent postfix check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='[email protected]',
# License
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# The package we're going to ship
packages=['datadog_checks.postfix'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
extras_require={'deps': get_dependencies()},
# Extra files to ship with the wheel package
include_package_data=True,
)
|
the-stack_0_26856
|
#!/usr/bin/python
# Copyright(C) 2012 Open Information Security Foundation
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
try:
import simplejson as json
except:
import json
import re
import readline
from socket import socket, AF_UNIX, error
from time import sleep
import select
import sys
SURICATASC_VERSION = "0.9"
VERSION = "0.1"
SIZE = 4096
class SuricataException(Exception):
"""
Generic class for suricatasc exception
"""
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
class SuricataNetException(SuricataException):
"""
Exception raised when network error occur.
"""
pass
class SuricataCommandException(SuricataException):
"""
Exception raised when command is not correct.
"""
pass
class SuricataReturnException(SuricataException):
"""
Exception raised when return message is not correct.
"""
pass
class SuricataCompleter:
def __init__(self, words):
self.words = words
self.generator = None
def complete(self, text):
for word in self.words:
if word.startswith(text):
yield word
def __call__(self, text, state):
if state == 0:
self.generator = self.complete(text)
try:
return next(self.generator)
except StopIteration:
return None
return None
class SuricataSC:
def __init__(self, sck_path, verbose=False):
self.cmd_list=['shutdown','quit','pcap-file','pcap-file-number','pcap-file-list','iface-list','iface-stat','register-tenant','unregister-tenant','register-tenant-handler','unregister-tenant-handler']
self.sck_path = sck_path
self.verbose = verbose
def json_recv(self):
cmdret = None
i = 0
data = ""
while i < 5:
i += 1
if sys.version < '3':
data += self.socket.recv(SIZE)
else:
data += self.socket.recv(SIZE).decode('iso-8859-1')
try:
cmdret = json.loads(data)
break
except:
sleep(0.3)
return cmdret
def send_command(self, command, arguments = None):
if command not in self.cmd_list and command != 'command-list':
raise SuricataCommandException("No such command: %s", command)
cmdmsg = {}
cmdmsg['command'] = command
if (arguments != None):
cmdmsg['arguments'] = arguments
if self.verbose:
print("SND: " + json.dumps(cmdmsg))
if sys.version < '3':
self.socket.send(json.dumps(cmdmsg))
else:
self.socket.send(bytes(json.dumps(cmdmsg), 'iso-8859-1'))
ready = select.select([self.socket], [], [], 600)
if ready[0]:
cmdret = self.json_recv()
else:
cmdret = None
if cmdret == None:
raise SuricataReturnException("Unable to get message from server")
if self.verbose:
print("RCV: "+ json.dumps(cmdret))
return cmdret
def connect(self):
try:
self.socket = socket(AF_UNIX)
self.socket.connect(self.sck_path)
except error as err:
raise SuricataNetException(err)
self.socket.settimeout(10)
#send version
if self.verbose:
print("SND: " + json.dumps({"version": VERSION}))
if sys.version < '3':
self.socket.send(json.dumps({"version": VERSION}))
else:
self.socket.send(bytes(json.dumps({"version": VERSION}), 'iso-8859-1'))
ready = select.select([self.socket], [], [], 600)
if ready[0]:
cmdret = self.json_recv()
else:
cmdret = None
if cmdret == None:
raise SuricataReturnException("Unable to get message from server")
if self.verbose:
print("RCV: "+ json.dumps(cmdret))
if cmdret["return"] == "NOK":
raise SuricataReturnException("Error: %s" % (cmdret["message"]))
cmdret = self.send_command("command-list")
# we silently ignore NOK as this means server is old
if cmdret["return"] == "OK":
self.cmd_list = cmdret["message"]["commands"]
self.cmd_list.append("quit")
def close(self):
self.socket.close()
def parse_command(self, command):
arguments = None
if command.split(' ', 2)[0] in self.cmd_list:
if "pcap-file " in command:
try:
parts = command.split(' ');
except:
raise SuricataCommandException("Arguments to command '%s' is missing" % (command))
cmd, filename, output = parts[0], parts[1], parts[2]
tenant = None
if len(parts) > 3:
tenant = parts[3]
if cmd != "pcap-file":
raise SuricataCommandException("Invalid command '%s'" % (command))
else:
arguments = {}
arguments["filename"] = filename
arguments["output-dir"] = output
if tenant != None:
arguments["tenant"] = int(tenant)
elif "iface-stat" in command:
try:
[cmd, iface] = command.split(' ', 1)
except:
raise SuricataCommandException("Unable to split command '%s'" % (command))
if cmd != "iface-stat":
raise SuricataCommandException("Invalid command '%s'" % (command))
else:
arguments = {}
arguments["iface"] = iface
elif "conf-get" in command:
try:
[cmd, variable] = command.split(' ', 1)
except:
raise SuricataCommandException("Unable to split command '%s'" % (command))
if cmd != "conf-get":
raise SuricataCommandException("Invalid command '%s'" % (command))
else:
arguments = {}
arguments["variable"] = variable
elif "unregister-tenant-handler" in command:
try:
parts = command.split(' ')
except:
raise SuricataCommandException("Arguments to command '%s' is missing" % (command))
cmd, tenantid, htype = parts[0], parts[1], parts[2]
hargs = None
if len(parts) > 3:
hargs = parts[3]
if cmd != "unregister-tenant-handler":
raise SuricataCommandException("Invalid command '%s'" % (command))
else:
arguments = {}
arguments["id"] = int(tenantid)
arguments["htype"] = htype
if hargs != None:
arguments["hargs"] = int(hargs)
elif "register-tenant-handler" in command:
try:
parts = command.split(' ')
except:
raise SuricataCommandException("Arguments to command '%s' is missing" % (command))
cmd, tenantid, htype = parts[0], parts[1], parts[2]
hargs = None
if len(parts) > 3:
hargs = parts[3]
if cmd != "register-tenant-handler":
raise SuricataCommandException("Invalid command '%s'" % (command))
else:
arguments = {}
arguments["id"] = int(tenantid)
arguments["htype"] = htype
if hargs != None:
arguments["hargs"] = int(hargs)
elif "unregister-tenant" in command:
try:
[cmd, tenantid] = command.split(' ', 1)
except:
raise SuricataCommandException("Unable to split command '%s'" % (command))
if cmd != "unregister-tenant":
raise SuricataCommandException("Invalid command '%s'" % (command))
else:
arguments = {}
arguments["id"] = int(tenantid)
elif "register-tenant" in command:
try:
[cmd, tenantid, filename] = command.split(' ', 2)
except:
raise SuricataCommandException("Arguments to command '%s' is missing" % (command))
if cmd != "register-tenant":
raise SuricataCommandException("Invalid command '%s'" % (command))
else:
arguments = {}
arguments["id"] = int(tenantid)
arguments["filename"] = filename
elif "reload-tenant" in command:
try:
[cmd, tenantid, filename] = command.split(' ', 2)
except:
raise SuricataCommandException("Arguments to command '%s' is missing" % (command))
if cmd != "reload-tenant":
raise SuricataCommandException("Invalid command '%s'" % (command))
else:
arguments = {}
arguments["id"] = int(tenantid)
arguments["filename"] = filename
else:
cmd = command
else:
raise SuricataCommandException("Unknown command '%s'" % (command))
return (cmd, arguments)
def interactive(self):
print("Command list: " + ", ".join(self.cmd_list))
try:
readline.set_completer(SuricataCompleter(self.cmd_list))
readline.set_completer_delims(";")
readline.parse_and_bind('tab: complete')
while True:
if sys.version < '3':
command = raw_input(">>> ").strip()
else:
command = input(">>> ").strip()
if command == "quit":
break;
try:
(cmd, arguments) = self.parse_command(command)
except SuricataCommandException as err:
print(err)
continue
cmdret = self.send_command(cmd, arguments)
#decode json message
if cmdret["return"] == "NOK":
print("Error:")
print(json.dumps(cmdret["message"], sort_keys=True, indent=4, separators=(',', ': ')))
else:
print("Success:")
print(json.dumps(cmdret["message"], sort_keys=True, indent=4, separators=(',', ': ')))
except KeyboardInterrupt:
print("[!] Interrupted")
|
the-stack_0_26857
|
#!/usr/bin/python3
"""Training and Validation On Classification Task."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import math
import random
import shutil
import argparse
import importlib
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(os.path.join(BASE_DIR, '..'))
DATA_DIR = os.path.join(ROOT_DIR, '../../../../')
import data_utils
import numpy as np
import pointfly as pf
import tensorflow as tf
from datetime import datetime
import provider
import h5py
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=1, help='GPU to use [default: GPU 0]')
parser.add_argument('--load_ckpt', '-l', help='Path to a check point file for load')
parser.add_argument('--log_dir', '-s', default='log/', help='Path to folder for saving check points and summary')
parser.add_argument('--with_bg', default = True, help='Whether to have background or not [default: True]')
parser.add_argument('--norm', default = True, help='Whether to normalize data or not [default: False]')
parser.add_argument('--center_data', default = True, help='Whether to explicitly center the data [default: False]')
parser.add_argument('--seg_weight', type=int, default=0.5, help='Segmentation weight in loss')
parser.add_argument('--train_file', default = 'h5_files/main_split/training_objectdataset_augmentedrot_scale75.h5', help='Location of training file')
parser.add_argument('--test_file', default = 'h5_files/main_split/test_objectdataset_augmentedrot_scale75.h5', help='Location of test file')
parser.add_argument('--model', '-m', default = 'pointcnn_seg', help='Model to use')
parser.add_argument('--setting', '-x', default = 'object_dataset_x3', help='Setting to use')
parser.add_argument('--epochs', help='Number of training epochs (default defined in setting)', type=int)
parser.add_argument('--batch_size', help='Batch size (default defined in setting)', type=int)
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 1024]')
args = parser.parse_args()
GPU_INDEX = args.gpu
time_string = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
root_folder = args.log_dir
if not os.path.exists(root_folder):
os.makedirs(root_folder)
WITH_BG = args.with_bg
NORMALIZED = args.norm
TRAIN_FILE = args.train_file
TEST_FILE = args.test_file
CENTER_DATA = args.center_data
SEG_WEIGHT = args.seg_weight
LOG_FOUT = open(os.path.join(root_folder, 'log_train.txt'), 'w')
LOG_FOUT.write(str(args)+'\n')
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
model = importlib.import_module(args.model)
setting_path = os.path.join(os.path.dirname(__file__), args.model)
sys.path.append(setting_path)
setting = importlib.import_module(args.setting)
num_epochs = args.epochs or setting.num_epochs
batch_size = args.batch_size or setting.batch_size
sample_num = args.num_point
step_val = setting.step_val
rotation_range = setting.rotation_range
rotation_range_val = setting.rotation_range_val
scaling_range = setting.scaling_range
scaling_range_val = setting.scaling_range_val
jitter = setting.jitter
jitter_val = setting.jitter_val
pool_setting_val = None if not hasattr(setting, 'pool_setting_val') else setting.pool_setting_val
pool_setting_train = None if not hasattr(setting, 'pool_setting_train') else setting.pool_setting_train
# Prepare inputs
log_string('{}-Preparing datasets...'.format(datetime.now()))
NUM_CLASSES = 15
print("Normalized: "+str(NORMALIZED))
print("Center Data: "+str(CENTER_DATA))
TRAIN_DATA, TRAIN_LABELS, TRAIN_MASKS = data_utils.load_withmask_h5(TRAIN_FILE)
TEST_DATA, TEST_LABELS, TEST_MASKS = data_utils.load_withmask_h5(TEST_FILE)
TRAIN_MASKS = data_utils.convert_to_binary_mask(TRAIN_MASKS)
TEST_MASKS = data_utils.convert_to_binary_mask(TEST_MASKS)
if (CENTER_DATA):
TRAIN_DATA = data_utils.center_data(TRAIN_DATA)
TEST_DATA = data_utils.center_data(TEST_DATA)
if (NORMALIZED):
TRAIN_DATA = data_utils.normalize_data(TRAIN_DATA)
TEST_DATA = data_utils.normalize_data(TEST_DATA)
num_train = len(TRAIN_DATA)
num_val = len(TEST_DATA)
print('{}-{:d}/{:d} training/validation samples.'.format(datetime.now(), num_train, num_val))
def train():
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
# Placeholders
xforms = tf.placeholder(tf.float32, shape=(None, 3, 3), name="xforms")
rotations = tf.placeholder(tf.float32, shape=(None, 3, 3), name="rotations")
jitter_range = tf.placeholder(tf.float32, shape=(1), name="jitter_range")
global_step = tf.Variable(0, trainable=False, name='global_step')
is_training_pl = tf.placeholder(tf.bool, name='is_training')
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, sample_num, 3), name='data')
labels_pl = tf.placeholder(tf.int32, shape=(batch_size), name='label')
masks_pl = tf.placeholder(tf.int32, shape=(batch_size, sample_num), name='mask')
points_augmented = pf.augment(pointclouds_pl, xforms, jitter_range)
net = model.Net(points=points_augmented, features=None, is_training=is_training_pl, setting=setting)
classification_logits = net.classification_logits
segmentation_logits = net.segmentation_logits
probs = tf.nn.softmax(classification_logits, name='probs')
predictions = tf.argmax(probs, axis=-1, name='predictions')
##classification loss
labels_2d = tf.expand_dims(labels_pl, axis=-1, name='labels_2d')
labels_tile = tf.tile(labels_2d, (1, tf.shape(classification_logits)[1]), name='labels_tile')
classify_loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=labels_tile, logits=classification_logits))
##segmentation loss
per_instance_seg_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=segmentation_logits, labels=masks_pl), axis=1)
seg_loss = tf.reduce_mean(per_instance_seg_loss)
loss_op = (1-SEG_WEIGHT)*classify_loss + SEG_WEIGHT*seg_loss
tf.summary.scalar('total loss', loss_op)
tf.summary.scalar('classify_loss', classify_loss)
tf.summary.scalar('seg_loss', seg_loss)
lr_exp_op = tf.train.exponential_decay(setting.learning_rate_base, global_step, setting.decay_steps,
setting.decay_rate, staircase=True)
lr_clip_op = tf.maximum(lr_exp_op, setting.learning_rate_min)
_ = tf.summary.scalar('learning_rate', tensor=lr_clip_op, collections=['train'])
reg_loss = setting.weight_decay * tf.losses.get_regularization_loss()
if setting.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate=lr_clip_op, epsilon=setting.epsilon)
elif setting.optimizer == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate=lr_clip_op, momentum=setting.momentum, use_nesterov=True)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss_op + reg_loss, global_step=global_step)
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
saver = tf.train.Saver(max_to_keep=None)
# backup all code
# code_folder = os.path.abspath(os.path.dirname(__file__))
# shutil.copytree(code_folder, os.path.join(root_folder)
folder_ckpt = root_folder
# if not os.path.exists(folder_ckpt):
# os.makedirs(folder_ckpt)
folder_summary = os.path.join(root_folder, 'summary')
if not os.path.exists(folder_summary):
os.makedirs(folder_summary)
parameter_num = np.sum([np.prod(v.shape.as_list()) for v in tf.trainable_variables()])
print('{}-Parameter number: {:d}.'.format(datetime.now(), parameter_num))
sess.run(init_op)
# saver.restore(sess, os.path.join(folder_ckpt, "model.ckpt"))
# log_string("Model restored.")
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.path.join(folder_summary, 'train'),
sess.graph)
test_writer = tf.summary.FileWriter(os.path.join(folder_summary, 'test'))
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'masks_pl': masks_pl,
'is_training_pl': is_training_pl,
'pred': probs,
'seg_pred': segmentation_logits,
'loss': loss_op,
'classify_loss': classify_loss,
'seg_loss': seg_loss,
'train_op': train_op,
'merged': merged,
'step': global_step,
'xforms': xforms,
'rotations': rotations,
'jitter_range': jitter_range}
for epoch in range(num_epochs):
log_string('**** EPOCH %03d ****' % (epoch))
sys.stdout.flush()
train_one_epoch(sess, ops, train_writer)
eval_one_epoch(sess, ops, test_writer)
# Save the variables to disk.
# if epoch % 10 == 0:
save_path = saver.save(sess, os.path.join(folder_ckpt, "model.ckpt"))
log_string("Model saved in file: %s" % save_path)
def train_one_epoch(sess, ops, train_writer):
is_training = True
current_data, current_label, current_mask = data_utils.get_current_data_withmask_h5(TRAIN_DATA, TRAIN_LABELS, TRAIN_MASKS, sample_num)
current_label = np.squeeze(current_label)
current_mask = np.squeeze(current_mask)
num_batches = current_data.shape[0]//batch_size
total_correct = 0
total_seen = 0
loss_sum = 0
total_correct_seg = 0
classify_loss_sum = 0
seg_loss_sum = 0
for batch_idx in range(num_batches):
start_idx = batch_idx * batch_size
end_idx = (batch_idx+1) * batch_size
xforms_np, rotations_np = pf.get_xforms(batch_size,
rotation_range=rotation_range,
scaling_range=scaling_range,
order=setting.rotation_order)
# Augment batched point clouds by rotation and jittering
feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
ops['labels_pl']: current_label[start_idx:end_idx],
ops['masks_pl']: current_mask[start_idx:end_idx],
ops['is_training_pl']: is_training,
ops['xforms']: xforms_np,
ops['rotations']: rotations_np,
ops['jitter_range']: np.array([jitter])}
summary, step, _, loss_val, pred_val, seg_val, classify_loss, seg_loss = sess.run([ops['merged'], ops['step'],
ops['train_op'], ops['loss'], ops['pred'], ops['seg_pred'], ops['classify_loss'], ops['seg_loss']], feed_dict=feed_dict)
train_writer.add_summary(summary, step)
pred_val = np.sum(pred_val, axis=1)
pred_val = np.argmax(pred_val, 1)
# print(pred_val)
# print(current_label[start_idx:end_idx])
correct = np.sum(pred_val == current_label[start_idx:end_idx])
seg_val = np.argmax(seg_val, 2)
seg_correct = np.sum(seg_val == current_mask[start_idx:end_idx])
total_correct_seg += seg_correct
total_correct += correct
total_seen += batch_size
loss_sum += loss_val
classify_loss_sum += classify_loss
seg_loss_sum += seg_loss
log_string('mean loss: %f' % (loss_sum / float(num_batches)))
log_string('classify mean loss: %f' % (classify_loss_sum / float(num_batches)))
log_string('seg mean loss: %f' % (seg_loss_sum / float(num_batches)))
log_string('accuracy: %f' % (total_correct / float(total_seen)))
log_string('seg accuracy: %f' % (total_correct_seg / (float(total_seen)*sample_num)))
def eval_one_epoch(sess, ops, test_writer):
is_training = False
total_correct = 0
total_seen = 0
loss_sum = 0
classify_loss_sum = 0
seg_loss_sum = 0
total_correct_seg = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
current_data, current_label, current_mask = data_utils.get_current_data_withmask_h5(TEST_DATA, TEST_LABELS, TEST_MASKS, sample_num)
current_label = np.squeeze(current_label)
current_mask = np.squeeze(current_mask)
num_batches = current_data.shape[0]//batch_size
for batch_idx in range(num_batches):
start_idx = batch_idx * batch_size
end_idx = (batch_idx+1) * batch_size
xforms_np, rotations_np = pf.get_xforms(batch_size,
rotation_range=rotation_range_val,
scaling_range=scaling_range_val,
order=setting.rotation_order)
# Augment batched point clouds by rotation and jittering
feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
ops['labels_pl']: current_label[start_idx:end_idx],
ops['masks_pl']: current_mask[start_idx:end_idx],
ops['is_training_pl']: is_training,
ops['xforms']: xforms_np,
ops['rotations']: rotations_np,
ops['jitter_range']: np.array([jitter_val])}
summary, step, loss_val, pred_val, seg_val, classify_loss, seg_loss = sess.run([ops['merged'], ops['step'],
ops['loss'], ops['pred'], ops['seg_pred'], ops['classify_loss'], ops['seg_loss']], feed_dict=feed_dict)
pred_val = np.sum(pred_val, axis=1)
pred_val = np.argmax(pred_val, 1)
correct = np.sum(pred_val == current_label[start_idx:end_idx])
seg_val = np.argmax(seg_val, 2)
seg_correct = np.sum(seg_val == current_mask[start_idx:end_idx])
total_correct_seg += seg_correct
total_correct += correct
total_seen += batch_size
loss_sum += (loss_val*batch_size)
for i in range(start_idx, end_idx):
l = current_label[i]
total_seen_class[l] += 1
total_correct_class[l] += (pred_val[i-start_idx] == l)
log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
log_string('eval seg accuracy: %f' % (total_correct_seg / (float(total_seen)*sample_num)))
if __name__ == '__main__':
train()
|
the-stack_0_26858
|
"""
Pod API
This document refers to Symphony API calls that do not need encryption or decryption of content. - sessionToken can be obtained by calling the authenticationAPI on the symphony back end and the key manager respectively. Refer to the methods described in authenticatorAPI.yaml. - Actions are defined to be atomic, ie will succeed in their entirety or fail and have changed nothing. - If it returns a 40X status then it will have made no change to the system even if ome subset of the request would have succeeded. - If this contract cannot be met for any reason then this is an error and the response code will be 50X. # noqa: E501
The version of the OpenAPI document: 20.14.0-SNAPSHOT
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from symphony.bdk.gen.api_client import ApiClient, Endpoint as _Endpoint
from symphony.bdk.gen.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from symphony.bdk.gen.pod_model.error import Error
from symphony.bdk.gen.pod_model.user_v2 import UserV2
class SessionApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.v2_sessioninfo_get_endpoint = _Endpoint(
settings={
'response_type': (UserV2,),
'auth': [],
'endpoint_path': '/v2/sessioninfo',
'operation_id': 'v2_sessioninfo_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'session_token',
],
'required': [
'session_token',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'session_token':
(str,),
},
'attribute_map': {
'session_token': 'sessionToken',
},
'location_map': {
'session_token': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
def v2_sessioninfo_get(
self,
session_token,
**kwargs
):
"""Get information about the current user's session. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = pod_api.v2_sessioninfo_get(session_token, async_req=True)
>>> result = thread.get()
Args:
session_token (str): Session authentication token.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
UserV2
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['session_token'] = \
session_token
return self.v2_sessioninfo_get_endpoint.call_with_http_info(**kwargs)
|
the-stack_0_26859
|
#!/usr/bin/python
from __future__ import print_function
import itertools
import numbers
import os
import subprocess
import sys
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
HERE = os.path.dirname(os.path.realpath(__file__))
TEST_FOLDER = os.path.abspath(os.path.join(HERE, "..", "test"))
sys.path.append(TEST_FOLDER)
from sig_utils import *
WORKING_FOLDER = "./benchmarks/"
BENCHMARK_TEMPLATE = """
static void {benchmark_name}(benchmark::State& state) {{
{setup}
for (auto _ : state) {{
{var_conversions}
auto start = std::chrono::high_resolution_clock::now();
{code}
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
state.SetIterationTime(elapsed_seconds.count());
stan::math::recover_memory();
benchmark::ClobberMemory();
}}
}}
BENCHMARK({benchmark_name})->RangeMultiplier({multi})->Range(1, {max_size})->UseManualTime();
"""
CUSTOM_MAIN = """
int main(int argc, char** argv)
{{
stan::math::ChainableStack::instance_->memalloc_.alloc({});
stan::math::recover_memory();
::benchmark::Initialize(&argc, argv);
::benchmark::RunSpecifiedBenchmarks();
}}
"""
overload_scalar = {
"Prim": "double",
"Rev": "stan::math::var",
"Fwd": "stan::math::fvar<double>",
"Mix": "stan::math::fvar<stan::math::var>",
}
def run_command(command):
"""
Runs given command and waits until it finishes executing.
:param command: command to execute
"""
print()
print(" ".join(command))
p1 = subprocess.Popen(command)
if p1.wait() != 0:
raise RuntimeError("command failed: " + " ".join(command))
def build(exe_filepath):
"""
Builds a file using make.
:param exe_filepath: File to build
"""
run_command([make, exe_filepath])
def run_benchmark(exe_filepath, n_repeats=1, csv_out_file=None):
"""
Runs a benchmark
:param exe_filepath: path to the benchmark executable
:param n_repeats: how many times to repeat each benchmark
:param csv_out_file: path to csv fle to store benchmark results into
"""
command = [exe_filepath]
if n_repeats > 1:
command.append("--benchmark_repetitions={}".format(n_repeats))
command.append("--benchmark_report_aggregates_only=true")
if csv_out_file is not None:
command.append("--benchmark_out={}".format(csv_out_file))
command.append("--benchmark_out_format=csv")
run_command(command)
def pick_color(n):
str_bit_reversed_n = "{:015b}".format(n + 1)[::-1]
r = 0.9 * ((int(str_bit_reversed_n[0::3], 2) / 2.0 ** 5 + 0.3) % 1)
g = 0.9 * ((int(str_bit_reversed_n[1::3], 2) / 2.0 ** 5 + 0.3) % 1)
b = 0.9 * ((int(str_bit_reversed_n[2::3], 2) / 2.0 ** 5 + 0.3) % 1)
return r, g, b
def plot_results(csv_filename, out_file="", plot_log_y=False):
"""
Plots benchmark results.
:param csv_filename: path to csv file containing results to plot
:param out_file: path to image file to store figure into. If it equals to "window" opens it in an interactive window.
"""
import pandas
import numpy
import matplotlib
if out_file != "window":
matplotlib.use("Agg")
import matplotlib.pyplot as plt
with open(csv_filename) as f:
# google benchmark writes some non-csv data at beginning
for line in iter(f.readline, ""):
if line.startswith("name,iterations"):
f.seek(f.tell() - len(line) - 1, os.SEEK_SET)
break
data = pandas.read_csv(f)
timing_data = pandas.concat(
[data["name"].str.split("/", expand=True).iloc[:, :2], data["real_time"]],
axis=1,
)
timing_data.columns = ["signatures", "sizes", "times"]
timing_data.loc[:, "sizes"] = timing_data["sizes"].astype(int)
timing_data.loc[:, "times"] /= 1000 # convert to microseconds
fig, ax = plt.subplots(figsize=(10, 10))
fig.set_tight_layout(True)
ax.set_xscale("log")
if plot_log_y:
ax.set_yscale("log")
ax.set_xlabel("size")
ax.set_ylabel("time[us]")
for n, (signature, sub_data) in enumerate(timing_data.groupby("signatures")):
ax.plot(
sub_data["sizes"],
sub_data["times"],
"x",
color=pick_color(n),
label="_nolegend_",
)
avg_sig_times = (
sub_data.groupby(by="sizes")["times"]
.median()
.reset_index()
.sort_values(by="sizes")
)
ax.plot(
avg_sig_times["sizes"],
avg_sig_times["times"],
label=signature,
color=pick_color(n),
)
[
spine.set_visible(False)
for loc, spine in ax.spines.items()
if loc in ["top", "right", "left", "bottom"]
]
ax.minorticks_off()
ax.grid()
ax.legend()
if out_file == "window":
plt.show()
else:
fig.savefig(out_file, bbox_inches="tight", dpi=300)
def plot_compare(csv_filename, reference_csv_filename, out_file="", plot_log_y=False):
"""
Plots benchmark speedup compared to reference results.
:param csv_filename: path to csv file containing results to plot
:param reference_csv_filename: path to csv file containing reference results to plot
:param out_file: path to image file to store figure into. If it equals to "window" opens it in an interactive window.
"""
import pandas, numpy, matplotlib
if out_file != "window":
matplotlib.use("Agg")
import matplotlib.pyplot as plt
with open(csv_filename) as f:
# google benchmark writes some non-csv data at beginning
for line in iter(f.readline, ""):
if line.startswith("name,iterations"):
f.seek(f.tell() - len(line) - 1, os.SEEK_SET)
break
data = pandas.read_csv(f)
with open(reference_csv_filename) as f:
# google benchmark writes some non-csv data at beginning
for line in iter(f.readline, ""):
if line.startswith("name,iterations"):
f.seek(f.tell() - len(line) - 1, os.SEEK_SET)
break
reference_data = pandas.read_csv(f)
timing_data = pandas.concat(
[data["name"].str.split("/", expand=True).iloc[:, :2], data["real_time"]],
axis=1,
)
reference_timing_data = pandas.concat(
[
reference_data["name"].str.split("/", expand=True).iloc[:, :2],
reference_data["real_time"],
],
axis=1,
)
timing_data.columns = reference_timing_data.columns = [
"signatures",
"sizes",
"times",
]
same_in_last_selector = reference_timing_data["signatures"].isin(
timing_data["signatures"]
)
reference_timing_data = reference_timing_data.loc[same_in_last_selector, :]
assert (reference_timing_data["signatures"] == timing_data["signatures"]).all()
assert (reference_timing_data["sizes"] == timing_data["sizes"]).all()
timing_data["speedup"] = reference_timing_data["times"] / timing_data["times"]
timing_data["sizes"] = timing_data["sizes"].astype(int)
fig, ax = plt.subplots(figsize=(10, 10))
fig.set_tight_layout(True)
ax.set_xscale("log")
if plot_log_y:
ax.set_yscale("log")
ax.set_xlabel("size")
ax.set_ylabel("speedup")
for n, (signature, sub_data) in enumerate(timing_data.groupby("signatures")):
ax.plot(
sub_data["sizes"],
sub_data["speedup"],
"x",
color=pick_color(n),
label="_nolegend_",
)
avg_sig_speedups = (
sub_data.groupby(by="sizes")["speedup"]
.median()
.reset_index()
.sort_values(by="sizes")
)
ax.plot(
avg_sig_speedups["sizes"],
avg_sig_speedups["speedup"],
label=signature,
color=pick_color(n),
)
plt.plot([1, max(timing_data["sizes"])], [1, 1], "--", color="gray")
[
spine.set_visible(False)
for loc, spine in ax.spines.items()
if loc in ["top", "right", "left", "bottom"]
]
ax.minorticks_off()
ax.grid()
ax.legend()
if out_file == "window":
plt.show()
else:
fig.savefig(out_file, bbox_inches="tight", dpi=300)
def benchmark(
functions_or_sigs,
cpp_filename="benchmark.cpp",
overloads=("Prim", "Rev"),
multiplier_param=None,
max_size_param=None,
max_dim=3,
n_repeats=1,
skip_similar_signatures=False,
csv_out_file=None,
opencl=False,
varmat=False,
):
"""
Generates benchmark code, compiles it and runs the benchmark.
:param functions_or_sigs: List of function names and/or signatures to benchmark
:param cpp_filename: filename of cpp file to use
:param overloads: Which overloads to benchmark
:param multiplier_param: Multiplyer, by which to increase argument size.
:param max_size_param: Maximum argument size.
:param max_dim: Maximum number of argument dimensions to benchmark. Signatures with any argument with
larger number of dimensions are skipped."
:param n_repeats: Number of times to repeat each benchmark.
:param skip_similar_signatures: Whether to skip similar signatures. Two signatures are similar if they
difffer only in similar vector types, which are vector, row_vector and real[].
:param csv_out_file: Filename of the csv file to store benchmark results in.
"""
all_signatures = get_signatures()
functions, signatures = handle_function_list(functions_or_sigs)
functions = set(functions)
signatures = set(signatures)
remaining_functions = set(functions)
parsed_signatures = []
ref_signatures = set()
for signature in all_signatures:
return_type, function_name, stan_args = parse_signature(signature)
reference_args = tuple(reference_vector_argument(i) for i in stan_args)
if (
skip_similar_signatures
and (function_name, reference_args) in ref_signatures
):
continue
if (signature in signatures) or (function_name in functions):
parsed_signatures.append([return_type, function_name, stan_args])
remaining_functions.discard(function_name)
ref_signatures.add((function_name, reference_args))
for signature in signatures:
return_type, function_name, stan_args = parse_signature(signature)
reference_args = tuple(reference_vector_argument(i) for i in stan_args)
if (
skip_similar_signatures
and (function_name, reference_args) in ref_signatures
):
continue
ref_signatures.add((function_name, reference_args))
parsed_signatures.append([return_type, function_name, stan_args])
remaining_functions.discard(function_name)
if remaining_functions:
raise NameError(
"Functions not found: " + ", ".join(sorted(remaining_functions))
)
result = ""
max_args_with_max_dimm = 0
default_max_size = 1024 * 1024 * 16
for return_type, function_name, stan_args in parsed_signatures:
dimm = 0
args_with_max_dimm = 0
for arg in stan_args:
arg_dimm = 0
if "vector" in arg:
arg_dimm = 1
if "matrix" in arg:
arg_dimm = 2
if "[" in arg:
arg_dimm += len(arg.split("[")[1])
if arg_dimm == dimm:
args_with_max_dimm += 1
elif arg_dimm > dimm:
dimm = arg_dimm
args_with_max_dimm = 1
if dimm > max_dim:
continue
max_args_with_max_dimm = max(max_args_with_max_dimm, args_with_max_dimm)
if max_size_param is None:
if dimm == 0: # signature with only scalar arguments
max_size = 1
else:
max_size = default_max_size
max_size = int(max_size ** (1.0 / dimm))
else:
max_size = max_size_param
if multiplier_param is None:
multiplier = 4
if dimm >= 2:
multiplier = 2
else:
multiplier = multiplier_param
cpp_arg_templates = []
overload_opts = []
for n, stan_arg in enumerate(stan_args):
cpp_arg_template = get_cpp_type(stan_arg)
arg_overload_opts = ["Prim"]
if "SCALAR" in cpp_arg_template and not (
function_name in non_differentiable_args
and n in non_differentiable_args[function_name]
):
arg_overload_opts = overloads
cpp_arg_templates.append(cpp_arg_template)
overload_opts.append(arg_overload_opts)
for arg_overloads in itertools.product(*overload_opts):
# generate one benchmark
benchmark_name = function_name
setup = ""
var_conversions = ""
code = " auto res = stan::math::eval(stan::math::{}(".format(
function_name
)
for (
n,
(arg_overload, cpp_arg_template, stan_arg),
) in enumerate(zip(arg_overloads, cpp_arg_templates, stan_args)):
if stan_arg.endswith("]"):
stan_arg2, vec = stan_arg.split("[")
benchmark_name += (
"_" + arg_overload + "_" + stan_arg2 + str(len(vec))
)
else:
benchmark_name += "_" + arg_overload + "_" + stan_arg
scalar = overload_scalar[arg_overload]
arg_type = cpp_arg_template.replace("SCALAR", scalar)
var_name = "arg" + str(n)
make_arg_function = "make_arg"
value = 0.4
if function_name in special_arg_values:
if isinstance(special_arg_values[function_name][n], str):
make_arg_function = special_arg_values[function_name][n]
elif isinstance(
special_arg_values[function_name][n], numbers.Number
):
value = special_arg_values[function_name][n]
if scalar == "double":
setup += (
" {} {} = stan::test::{}<{}>({}, state.range(0));\n".format(
arg_type,
var_name,
make_arg_function,
arg_type,
value,
)
)
if opencl == "base":
setup += " auto {} = stan::math::to_matrix_cl({});\n".format(
var_name + "_cl", var_name
)
var_name += "_cl"
elif varmat == "base" and arg_overload == "Rev":
setup += " auto {} = stan::math::to_var_value({});\n".format(
var_name + "_varmat", var_name
)
var_name += "_varmat"
else:
var_conversions += (
" {} {} = stan::test::{}<{}>({}, state.range(0));\n".format(
arg_type,
var_name,
make_arg_function,
arg_type,
value,
)
)
if opencl == "base":
var_conversions += (
" auto {} = stan::math::to_matrix_cl({});\n".format(
var_name + "_cl", var_name
)
)
var_name += "_cl"
elif varmat == "base" and arg_overload == "Rev":
var_conversions += (
" auto {} = stan::math::to_var_value({});\n".format(
var_name + "_varmat", var_name
)
)
var_name += "_varmat"
if opencl == "copy" and stan_arg not in ("int", "real"):
code += "stan::math::to_matrix_cl({}), ".format(var_name)
elif (
varmat == "copy"
and stan_arg not in ("int", "real")
and arg_overload == "Rev"
):
code += "stan::math::to_var_value({}), ".format(var_name)
else:
code += var_name + ", "
if opencl == "base":
var_conversions += " stan::math::opencl_context.queue().finish();\n"
code = code[:-2] + "));\n"
if "Rev" in arg_overloads:
code += " stan::math::grad();\n"
result += BENCHMARK_TEMPLATE.format(
benchmark_name=benchmark_name,
setup=setup,
var_conversions=var_conversions,
code=code,
multi=multiplier,
max_size=max_size,
)
cpp_filepath = os.path.join(WORKING_FOLDER, cpp_filename)
with open(cpp_filepath, "w") as f:
f.write("#include <benchmark/benchmark.h>\n")
f.write("#include <test/expressions/expression_test_helpers.hpp>\n\n")
f.write(result)
if "Rev" in overloads:
# estimate the amount of arena memory the benchmarks will need
DOUBLE_SIZE = 8
N_ARRAYS = 4 # vals, adjoints, pointers + 1 for anything else
f.write(
CUSTOM_MAIN.format(
(max_size_param or default_max_size)
* DOUBLE_SIZE
* N_ARRAYS
* (max_args_with_max_dimm + 1)
)
)
else:
f.write("BENCHMARK_MAIN();")
exe_filepath = cpp_filepath.replace(".cpp", exe_extension)
build(exe_filepath)
run_benchmark(exe_filepath, n_repeats, csv_out_file)
def main(
functions_or_sigs,
cpp_filename="benchmark.cpp",
overloads=("Prim", "Rev"),
multiplier_param=None,
max_size_param=None,
max_dim=3,
n_repeats=1,
skip_similar_signatures=False,
csv_out_file=None,
opencl=False,
varmat=False,
plot=False,
plot_log_y=False,
plot_speedup=False,
plot_reference=None,
):
"""
Generates benchmark code, compiles it and runs the benchmark. Optionally plots the results.
:param functions_or_sigs: List of function names and/or signatures to benchmark
:param cpp_filename: filename of cpp file to use
:param overloads: Which overloads to benchmark
:param multiplier_param: Multiplyer, by which to increase argument size.
:param max_size_param: Maximum argument size.
:param max_dim: Maximum number of argument dimensions to benchmark. Signatures with any argument with
larger number of dimensions are skipped."
:param n_repeats: Number of times to repeat each benchmark.
:param skip_similar_signatures: Whether to skip similar signatures. Two signatures are similar if they
difffer only in similar vector types, which are vector, row_vector and real[].
:param csv_out_file: Filename of the csv file to store benchmark results in.
:param plot: Filename of bmp or csv fle to store plot into. If filename is empty, opens a window with graph.
:param plot_log_y: Use logarithmic y axis for plotting
:param plot_speedup: plot speedup of OpenCL or varmat overloads compared to CPU ones
"""
if plot and csv_out_file is None:
csv_out_file = ".benchmark.csv"
if plot_speedup and (opencl or varmat):
if opencl:
special = "_cl"
else:
special = "_varmat"
opencl_csv_out_file = csv_out_file + special
if "." in csv_out_file:
base, ext = csv_out_file.rsplit(".", 1)
opencl_csv_out_file = base + special + "." + ext
benchmark(
functions_or_sigs,
cpp_filename,
overloads,
multiplier_param,
max_size_param,
max_dim,
n_repeats,
skip_similar_signatures,
csv_out_file,
False,
False,
)
benchmark(
functions_or_sigs,
cpp_filename,
overloads,
multiplier_param,
max_size_param,
max_dim,
n_repeats,
skip_similar_signatures,
opencl_csv_out_file,
opencl,
varmat,
)
plot_compare(opencl_csv_out_file, csv_out_file, plot)
else:
benchmark(
functions_or_sigs,
cpp_filename,
overloads,
multiplier_param,
max_size_param,
max_dim,
n_repeats,
skip_similar_signatures,
csv_out_file,
opencl,
varmat,
)
if plot_reference:
plot_compare(csv_out_file, plot_reference, plot, plot_log_y)
elif plot:
plot_results(csv_out_file, plot, plot_log_y)
class FullErrorMsgParser(ArgumentParser):
"""
Modified ArgumentParser that prints full error message on any error.
"""
def error(self, message):
sys.stderr.write("error: %s\n" % message)
self.print_help()
sys.exit(2)
def processCLIArgs():
"""
Define and process the command line interface to the benchmark.py script.
"""
parser = FullErrorMsgParser(
description="Generate and run_command benchmarks.",
formatter_class=ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"functions",
nargs="+",
type=str,
default=[],
help="Signatures and/or function names to benchmark.",
)
parser.add_argument(
"--overloads",
nargs="+",
type=str,
default=["Prim", "Rev"],
help="Which overload combinations to benchmark. Possible values: Prim, Rev, Fwd, Mix. Defaults to Prim and Rev.",
)
parser.add_argument(
"--multiplier",
type=int,
default=None,
help="Multiplyer, by which to increase argument size. Defaults to 4 for functions with "
"1-dimensional arguments and 2 for other functions.",
)
parser.add_argument(
"--max_size",
type=int,
default=None,
help="Maximum argument size. Defaults to (16000000)**(1/dimm), where dimm is the largest "
"number of dimensions of arguments.",
)
parser.add_argument(
"--max_dim",
type=int,
default=3,
help="Maximum number of argument dimensions to benchmark. Signatures with any argument with "
"larger number of dimensions are skipped.",
)
parser.add_argument(
"--cpp",
metavar="filename",
type=str,
default="benchmark.cpp",
help="Filename of the cpp file to generate.",
)
parser.add_argument(
"--repeats",
metavar="N",
type=int,
default=1,
help="Number of times to repeat each benchmark.",
)
parser.add_argument(
"--csv",
metavar="filename",
type=str,
default=None,
help="Filename of the csv file to store benchmark results in. By default does not store results.",
)
parser.add_argument(
"--plot",
metavar="filename",
type=str,
default=False,
help="Filename store plotted graph into. If filename equals to 'window', opens a window with the graph."
" Plotting requires matplotlib and pandas libraries. Default: no plotting.",
)
parser.add_argument(
"--plot_log_y",
default=False,
action="store_true",
help="Use logarithmic y axis when plotting.",
)
parser.add_argument(
"--opencl",
metavar="setting",
type=str,
default=False,
help="Benchmark OpenCL overloads. Possible values: "
"base - benchmark just the execution time, "
"copy - include argument copying time",
)
parser.add_argument(
"--varmat",
metavar="setting",
type=str,
default=False,
help="Benchmark varmat overloads. Possible values: "
"base - benchmark just the execution time, "
"copy - include argument copying time",
)
parser.add_argument(
"--plot_speedup",
default=False,
action="store_true",
help="Plots speedup of OpenCL or varmat overloads compared to Eigen matvar ones. Can only be specified together "
"with both --plot and either --opencl or --varmat. Cannot be specified together with --plot_reference.",
)
parser.add_argument(
"--plot_reference",
metavar="filename",
type=str,
default=None,
help="Specify filename of reference run csv output. Plots speedup of this run compared to the reference. "
"Reference run must have all parameters the same as this one, except possibly --opencl, output files and "
"plotting parameters. Can only be specified together with --plot. Cannot be specified together with "
"--plot_cl_speedup.",
)
parser.add_argument(
"--skip_similar_signatures",
default=False,
action="store_true",
help="Skip similar signatures. Two signatures are similar if they"
"difffer only in similar vector types, which are vector, row_vector and real[].",
)
args = parser.parse_args()
assert not (args.opencl and args.varmat), ValueError(
"--opencl and --varmat cannot be specified at the same time!"
)
if args.plot_reference or args.plot_speedup or args.plot_log_y:
assert args.plot, ValueError(
"--plot is required if you specify any of --plot_reference, --plot_speedup, --plot_log_y!"
)
main(
functions_or_sigs=args.functions,
cpp_filename=args.cpp,
overloads=args.overloads,
multiplier_param=args.multiplier,
max_size_param=args.max_size,
max_dim=args.max_dim,
csv_out_file=args.csv,
n_repeats=args.repeats,
skip_similar_signatures=args.skip_similar_signatures,
plot=args.plot,
plot_log_y=args.plot_log_y,
opencl=args.opencl,
plot_speedup=args.plot_speedup,
plot_reference=args.plot_reference,
varmat=args.varmat,
)
if __name__ == "__main__":
processCLIArgs()
|
the-stack_0_26861
|
import cv2
import numpy as np
bgr = cv2.imread('files/gti.jpeg')
C1 = bgr[:,:,0]
C2 = bgr[:,:,1]
C3 = bgr[:,:,2]
cv2.imshow('BGR', np.hstack([C1, C2, C3]))
cv2.waitKey(0)
cv2.destroyAllWindows()
|
the-stack_0_26862
|
#!/usr/bin/python3
# this script will update the versions in packages and innosetup installer files to match that in config.h
import plistlib, os, datetime, fileinput, glob, sys, string
scriptpath = os.path.dirname(os.path.realpath(__file__))
projectpath = os.path.abspath(os.path.join(scriptpath, os.pardir))
IPLUG2_ROOT = "../../.."
sys.path.insert(0, os.path.join(os.getcwd(), IPLUG2_ROOT + '/Scripts'))
from parse_config import parse_config
def replacestrs(filename, s, r):
files = glob.glob(filename)
for line in fileinput.input(files,inplace=1):
string.find(line, s)
line = line.replace(s, r)
sys.stdout.write(line)
def main():
demo = 0
if len(sys.argv) != 2:
print("Usage: update_installer_version.py demo(0 or 1)")
sys.exit(1)
else:
demo=int(sys.argv[1])
config = parse_config(projectpath)
# MAC INSTALLER
print("Updating Mac Installer version info...")
plistpath = projectpath + "/installer/" + config['BUNDLE_NAME'] + ".pkgproj"
with open(plistpath, 'rb') as fp:
installer = plistlib.load(fp)
# range = number of items in the installer (VST 2, VST 3, app, audiounit, aax)
for x in range(0,5):
installer['PACKAGES'][x]['PACKAGE_SETTINGS']['VERSION'] = config['FULL_VER_STR']
if demo:
installer['PROJECT']['PROJECT_PRESENTATION']['TITLE']['LOCALIZATIONS'][0]['VALUE'] = config['BUNDLE_NAME'] + " Demo"
installer['PROJECT']['PROJECT_PRESENTATION']['INTRODUCTION']['LOCALIZATIONS'][0]['VALUE']['PATH'] = "intro-demo.rtf"
else:
installer['PROJECT']['PROJECT_PRESENTATION']['TITLE']['LOCALIZATIONS'][0]['VALUE'] = config['BUNDLE_NAME']
installer['PROJECT']['PROJECT_PRESENTATION']['INTRODUCTION']['LOCALIZATIONS'][0]['VALUE']['PATH'] = "intro.rtf"
with open(plistpath, 'wb') as fp:
plistlib.dump(installer, fp)
# replacestrs(plistpath, "//Apple//", "//Apple Computer//")
# WIN INSTALLER
print("Updating Windows Installer version info...")
for line in fileinput.input(projectpath + "/installer/" + config['BUNDLE_NAME'] + ".iss",inplace=1):
if "AppVersion" in line:
line="AppVersion=" + config['FULL_VER_STR'] + "\n"
if "OutputBaseFilename" in line:
if demo:
line="OutputBaseFilename=IPlugConvoEngine Demo Installer\n"
else:
line="OutputBaseFilename=IPlugConvoEngine Installer\n"
if 'Source: "readme' in line:
if demo:
line='Source: "readme-win-demo.rtf"; DestDir: "{app}"; DestName: "readme.rtf"; Flags: isreadme\n'
else:
line='Source: "readme-win.rtf"; DestDir: "{app}"; DestName: "readme.rtf"; Flags: isreadme\n'
if "WelcomeLabel1" in line:
if demo:
line="WelcomeLabel1=Welcome to the IPlugConvoEngine Demo installer\n"
else:
line="WelcomeLabel1=Welcome to the IPlugConvoEngine installer\n"
if "SetupWindowTitle" in line:
if demo:
line="SetupWindowTitle=IPlugConvoEngine Demo installer\n"
else:
line="SetupWindowTitle=IPlugConvoEngine installer\n"
sys.stdout.write(line)
if __name__ == '__main__':
main()
|
the-stack_0_26863
|
import asyncio
import copy
import csv
import math
import queue
import threading
import uuid
import warnings
from datetime import datetime
from typing import List, Optional, Union, Callable, Coroutine, Iterable
import pyarrow.parquet as pq
import pandas
import pytz
from .dtypes import _termination_obj, Event, legal_time_units
from .flow import Flow, Complete
from .utils import url_to_file_system, drop_reserved_columns, find_filters
class AwaitableResult:
"""Future result of a computation. Calling await_result() will return with the result once the computation is completed."""
def __init__(self, on_error: Optional[Callable[[], None]] = None):
self._on_error = on_error
self._q = queue.Queue(1)
self._completed = False
def await_result(self):
"""Returns the result, once the computation is completed"""
result = self._q.get()
if isinstance(result, BaseException):
if self._on_error:
self._on_error()
raise result
return result
def _set_result(self, element):
if not self._completed:
self._completed = True
self._q.put(element)
def _set_error(self, ex):
self._set_result(ex)
def _convert_to_datetime(obj, time_format: Optional[str] = None):
if isinstance(obj, datetime):
return obj
elif isinstance(obj, float) or isinstance(obj, int):
return datetime.fromtimestamp(obj, tz=pytz.utc)
elif isinstance(obj, str):
if time_format is None:
return datetime.fromisoformat(obj)
else:
return datetime.strptime(obj, time_format)
else:
raise ValueError(f"Could not parse '{obj}' (of type {type(obj)}) as a time.")
class FlowControllerBase:
def __init__(self, key_field: Optional[Union[str, List[str]]], time_field: Optional[str], time_format: Optional[str]):
self._key_field = key_field
self._time_field = time_field
self._time_format = time_format
self._current_uuid_base = None
self._current_uuid_count = 0
def _get_uuid(self):
if not self._current_uuid_base or self._current_uuid_count == 1024:
self._current_uuid_base = uuid.uuid4().hex
self._current_uuid_count = 0
result = f'{self._current_uuid_base}-{self._current_uuid_count:04}'
self._current_uuid_count += 1
return result
def _build_event(self, element, key, event_time):
body = element
element_is_event = hasattr(element, 'id')
if element_is_event:
body = element.body
if not key and self._key_field:
if isinstance(self._key_field, str):
key = body[self._key_field]
else:
key = []
for field in self._key_field:
key.append(body[field])
if not event_time and self._time_field:
event_time = _convert_to_datetime(body[self._time_field], self._time_format)
body[self._time_field] = event_time
if element_is_event:
if key:
element.key = key
if event_time:
element.time = event_time
return element
else:
return Event(body, id=self._get_uuid(), key=key, time=event_time)
class FlowController(FlowControllerBase):
"""Used to emit events into the associated flow, terminate the flow, and await the flow's termination.
To be used from a synchronous context.
"""
def __init__(self, emit_fn, await_termination_fn, return_awaitable_result, key_field: Optional[str] = None,
time_field: Optional[str] = None, time_format: Optional[str] = None):
super().__init__(key_field, time_field, time_format)
self._emit_fn = emit_fn
self._await_termination_fn = await_termination_fn
self._return_awaitable_result = return_awaitable_result
def emit(self, element: object, key: Optional[Union[str, List[str]]] = None, event_time: Optional[datetime] = None,
return_awaitable_result: Optional[bool] = None):
"""Emits an event into the associated flow.
:param element: The event data, or payload. To set metadata as well, pass an Event object.
:param key: The event key(s) (optional) #add to async
:param event_time: The event time (default to current time, UTC).
:param return_awaitable_result: Deprecated! An awaitable result object will be returned if a Complete step appears in the flow.
:returns: AsyncAwaitableResult if a Complete appears in the flow. None otherwise.
"""
if return_awaitable_result is not None:
warnings.warn('return_awaitable_result is deprecated. An awaitable result object will be returned if a Complete step appears '
'in the flow.',
DeprecationWarning)
event = self._build_event(element, key, event_time)
awaitable_result = None
if self._return_awaitable_result:
awaitable_result = AwaitableResult(self.terminate)
event._awaitable_result = awaitable_result
self._emit_fn(event)
return awaitable_result
def terminate(self):
"""Terminates the associated flow."""
self._emit_fn(_termination_obj)
def await_termination(self):
"""Awaits the termination of the flow. To be called after terminate. Returns the termination result of the flow (if any)."""
return self._await_termination_fn()
class FlowAwaiter:
"""Future termination result of a flow. Calling await_termination() will wait for the flow to terminate and return its
termination result."""
def __init__(self, await_termination_fn):
self._await_termination_fn = await_termination_fn
def await_termination(self):
""""waits for the flow to terminate and returns the result"""
return self._await_termination_fn()
class SyncEmitSource(Flow):
"""Synchronous entry point into a flow. Produces a FlowController when run, for use from inside a synchronous context. See AsyncEmitSource
for use from inside an async context.
:param buffer_size: size of the incoming event buffer. Defaults to 1024.
:param key_field: Field to extract and use as the key. Optional.
:param time_field: Field to extract and use as the time. Optional.
:param time_format: Format of the event time. Needed when a nonstandard string timestamp is used (i.e. not ISO or epoch). Optional.
:param name: Name of this step, as it should appear in logs. Defaults to class name (SyncEmitSource).
:type name: string
for additional params, see documentation of :class:`storey.flow.Flow`
"""
_legal_first_step = True
def __init__(self, buffer_size: Optional[int] = None, key_field: Union[list, str, None] = None, time_field: Optional[str] = None,
time_format: Optional[str] = None, **kwargs):
if buffer_size is None:
buffer_size = 1024
else:
kwargs['buffer_size'] = buffer_size
if key_field is not None:
kwargs['key_field'] = key_field
super().__init__(**kwargs)
if buffer_size <= 0:
raise ValueError('Buffer size must be positive')
self._q = queue.Queue(buffer_size)
self._key_field = key_field
self._time_field = time_field
self._time_format = time_format
self._termination_q = queue.Queue(1)
self._ex = None
self._closeables = []
async def _run_loop(self):
loop = asyncio.get_running_loop()
self._termination_future = asyncio.get_running_loop().create_future()
while True:
event = await loop.run_in_executor(None, self._q.get)
try:
termination_result = await self._do_downstream(event)
if event is _termination_obj:
self._termination_future.set_result(termination_result)
except BaseException as ex:
if event is not _termination_obj and event._awaitable_result:
event._awaitable_result._set_error(ex)
self._ex = ex
if not self._q.empty():
event = self._q.get()
if event is not _termination_obj and event._awaitable_result:
event._awaitable_result._set_error(ex)
self._termination_future.set_result(None)
break
if event is _termination_obj:
break
for closeable in self._closeables:
await closeable.close()
def _loop_thread_main(self):
asyncio.run(self._run_loop())
self._termination_q.put(self._ex)
def _raise_on_error(self, ex):
if ex:
if self.verbose:
raise type(self._ex)('Flow execution terminated') from self._ex
raise self._ex
def _emit(self, event):
if event is not _termination_obj:
self._raise_on_error(self._ex)
self._q.put(event)
if event is not _termination_obj:
self._raise_on_error(self._ex)
def run(self):
"""Starts the flow"""
self._closeables = super().run()
thread = threading.Thread(target=self._loop_thread_main)
thread.start()
def raise_error_or_return_termination_result():
self._raise_on_error(self._termination_q.get())
return self._termination_future.result()
has_complete = self._check_step_in_flow(Complete)
return FlowController(self._emit, raise_error_or_return_termination_result, has_complete, self._key_field, self._time_field,
self._time_format)
class AsyncAwaitableResult:
"""Future result of a computation. Calling await_result() will return with the result once the computation is completed.
Same as AwaitableResult but for an async context."""
def __init__(self, on_error: Optional[Callable[[BaseException], Coroutine]] = None):
self._on_error = on_error
self._q = asyncio.Queue(1)
self._completed = False
async def await_result(self):
"""returns the result of the computation, once the computation is complete"""
result = await self._q.get()
if isinstance(result, BaseException):
if self._on_error:
await self._on_error()
raise result
return result
async def _set_result(self, element):
if not self._completed:
self._completed = True
await self._q.put(element)
async def _set_error(self, ex):
await self._set_result(ex)
class AsyncFlowController(FlowControllerBase):
"""
Used to emit events into the associated flow, terminate the flow, and await the flow's termination. To be used from inside an async def.
"""
def __init__(self, emit_fn, loop_task, await_result, key_field: Optional[str] = None, time_field: Optional[str] = None,
time_format: Optional[str] = None):
super().__init__(key_field, time_field, time_format)
self._emit_fn = emit_fn
self._loop_task = loop_task
self._key_field = key_field
self._time_field = time_field
self._time_format = time_format
self._await_result = await_result
async def emit(self, element: object, key: Optional[Union[str, List[str]]] = None, event_time: Optional[datetime] = None,
await_result: Optional[bool] = None) -> object:
"""Emits an event into the associated flow.
:param element: The event data, or payload. To set metadata as well, pass an Event object.
:param key: The event key(s) (optional)
:param event_time: The event time (default to current time, UTC).
:param await_result: Deprecated. Will await a result if a Complete step appears in the flow.
:returns: The result received from the flow if a Complete step appears in the flow. None otherwise.
"""
if await_result is not None:
warnings.warn('await_result is deprecated. An awaitable result object will be returned if a Complete step appears '
'in the flow.',
DeprecationWarning)
event = self._build_event(element, key, event_time)
awaitable = None
if self._await_result:
awaitable = AsyncAwaitableResult(self.terminate)
event._awaitable_result = awaitable
await self._emit_fn(event)
if self._await_result:
result = await awaitable.await_result()
if isinstance(result, BaseException):
raise result
return result
async def terminate(self):
"""Terminates the associated flow."""
await self._emit_fn(_termination_obj)
async def await_termination(self):
"""Awaits the termination of the flow. To be called after terminate. Returns the termination result of the flow (if any)."""
return await self._loop_task
class AsyncEmitSource(Flow):
"""
Asynchronous entry point into a flow. Produces an AsyncFlowController when run, for use from inside an async def.
See SyncEmitSource for use from inside a synchronous context.
:param buffer_size: size of the incoming event buffer. Defaults to 1024.
:param name: Name of this step, as it should appear in logs. Defaults to class name (AsyncEmitSource).
:type name: string
:param time_field: Field to extract and use as the time. Optional.
:param time_format: Format of the event time. Needed when a nonstandard string timestamp is used (i.e. not ISO or epoch). Optional.
for additional params, see documentation of :class:`~storey.flow.Flow`
"""
_legal_first_step = True
def __init__(self, buffer_size: int = 1024, key_field: Union[list, str, None] = None, time_field: Optional[str] = None,
time_format: Optional[str] = None, **kwargs):
super().__init__(**kwargs)
if buffer_size <= 0:
raise ValueError('Buffer size must be positive')
self._q = asyncio.Queue(buffer_size)
self._key_field = key_field
self._time_field = time_field
self._time_format = time_format
self._ex = None
self._closeables = []
async def _run_loop(self):
while True:
event = await self._q.get()
try:
termination_result = await self._do_downstream(event)
if event is _termination_obj:
return termination_result
except BaseException as ex:
self._ex = ex
if event is not _termination_obj and event._awaitable_result:
awaitable = event._awaitable_result._set_error(ex)
if awaitable:
await awaitable
if not self._q.empty():
await self._q.get()
self._raise_on_error()
finally:
if event is _termination_obj or self._ex:
for closeable in self._closeables:
await closeable.close()
def _raise_on_error(self):
if self._ex:
if self.verbose:
raise type(self._ex)('Flow execution terminated') from self._ex
raise self._ex
async def _emit(self, event):
if event is not _termination_obj:
self._raise_on_error()
await self._q.put(event)
if event is not _termination_obj:
self._raise_on_error()
def run(self):
"""Starts the flow"""
self._closeables = super().run()
loop_task = asyncio.get_running_loop().create_task(self._run_loop())
has_complete = self._check_step_in_flow(Complete)
return AsyncFlowController(self._emit, loop_task, has_complete, self._key_field, self._time_field)
class _IterableSource(Flow):
_legal_first_step = True
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._termination_q = queue.Queue(1)
self._ex = None
self._closeables = []
def _init(self):
pass
async def _run_loop(self):
raise NotImplementedError()
async def _async_loop_thread_main(self):
try:
self._termination_future = asyncio.get_running_loop().create_future()
termination_result = await self._run_loop()
self._termination_future.set_result(termination_result)
except BaseException as ex:
self._ex = ex
self._termination_future.set_result(None)
finally:
for closeable in self._closeables:
await closeable.close()
def _loop_thread_main(self):
asyncio.run(self._async_loop_thread_main())
self._termination_q.put(self._ex)
def _raise_on_error(self, ex):
if ex:
if self.verbose:
raise type(self._ex)('Flow execution terminated') from self._ex
raise self._ex
def run(self):
self._closeables = super().run()
self._init()
thread = threading.Thread(target=self._loop_thread_main)
thread.start()
def raise_error_or_return_termination_result():
self._raise_on_error(self._termination_q.get())
return self._termination_future.result()
return FlowAwaiter(raise_error_or_return_termination_result)
async def run_async(self):
self._closeables = super().run()
return await self._run_loop()
class CSVSource(_IterableSource):
"""
Reads CSV files as input source for a flow.
:parameter paths: paths to CSV files
:parameter header: whether CSV files have a header or not. Defaults to False.
:parameter build_dict: whether to format each record produced from the input file as a dictionary (as opposed to a list).
Default to False.
:parameter key_field: the CSV field to be use as the key for events. May be an int (field index) or string (field name) if
with_header is True. Defaults to None (no key). Can be a list of keys
:parameter time_field: the CSV field to be parsed as the timestamp for events. May be an int (field index) or string (field name)
if with_header is True. Defaults to None (no timestamp field).
:parameter timestamp_format: timestamp format as defined in datetime.strptime(). Default to ISO-8601 as defined in
datetime.fromisoformat().
:parameter type_inference: Whether to infer data types from the data (when True), or read all fields in as strings (when False).
Defaults to True.
for additional params, see documentation of :class:`~storey.flow.Flow`
"""
def __init__(self, paths: Union[List[str], str], header: bool = False, build_dict: bool = False,
key_field: Union[int, str, List[int], List[str], None] = None, time_field: Union[int, str, None] = None,
timestamp_format: Optional[str] = None, type_inference: bool = True, **kwargs):
kwargs['paths'] = paths
kwargs['header'] = header
kwargs['build_dict'] = build_dict
if key_field is not None:
kwargs['key_field'] = key_field
if time_field is not None:
kwargs['time_field'] = time_field
if timestamp_format is not None:
kwargs['timestamp_format'] = timestamp_format
kwargs['type_inference'] = type_inference
super().__init__(**kwargs)
if isinstance(paths, str):
paths = [paths]
self._paths = paths
self._with_header = header
self._build_dict = build_dict
self._key_field = key_field
self._time_field = time_field
self._timestamp_format = timestamp_format
self._type_inference = type_inference
self._storage_options = kwargs.get('storage_options')
if not header and isinstance(key_field, str):
raise ValueError('key_field can only be set to an integer when with_header is false')
if not header and isinstance(time_field, str):
raise ValueError('time_field can only be set to an integer when with_header is false')
def _init(self):
self._event_buffer = queue.Queue(1024)
self._types = []
self._none_columns = set()
def _infer_type(self, value):
lowercase = value.lower()
if lowercase == 'true' or lowercase == 'false':
return 'b'
try:
self._datetime_from_timestamp(value)
return 't'
except ValueError:
pass
try:
int(value)
return 'i'
except ValueError:
pass
try:
float(value)
return 'f'
except ValueError:
pass
if value == '':
return 'n'
return 's'
def _parse_field(self, field, index):
typ = self._types[index]
if typ == 's':
return field
if typ == 'f':
return float(field) if field != '' else math.nan
if typ == 'i':
return int(field) if field != '' else math.nan
if typ == 'b':
lowercase = field.lower()
if lowercase == 'true':
return True
if lowercase == 'false':
return False
if lowercase == '':
return None
raise TypeError(f'Expected boolean, got {field}')
if typ == 't':
if field == '':
return None
return self._datetime_from_timestamp(field)
if typ == 'n':
return None
raise TypeError(f'Unknown type: {typ}')
def _datetime_from_timestamp(self, timestamp):
if self._timestamp_format:
return pandas.to_datetime(timestamp, format=self._timestamp_format).floor('u').to_pydatetime()
else:
return datetime.fromisoformat(timestamp)
def _blocking_io_loop(self):
try:
for path in self._paths:
fs, file_path = url_to_file_system(path, self._storage_options)
with fs.open(file_path, mode='r') as f:
header = None
field_name_to_index = None
if self._with_header:
line = f.readline()
header = next(csv.reader([line]))
field_name_to_index = {}
for i in range(len(header)):
field_name_to_index[header[i]] = i
for line in f:
create_event = True
parsed_line = next(csv.reader([line]))
if self._type_inference:
if not self._types:
for index, field in enumerate(parsed_line):
type_field = self._infer_type(field)
self._types.append(type_field)
if type_field == 'n':
self._none_columns.add(index)
else:
for index in copy.copy(self._none_columns):
type_field = self._infer_type(parsed_line[index])
if type_field != 'n':
self._types[index] = type_field
self._none_columns.remove(index)
for i in range(len(parsed_line)):
parsed_line[i] = self._parse_field(parsed_line[i], i)
element = parsed_line
key = None
if header:
if len(parsed_line) != len(header):
raise ValueError(
f'CSV line with {len(parsed_line)} fields did not match header with {len(header)} fields')
if self._build_dict:
element = {}
for i in range(len(parsed_line)):
element[header[i]] = parsed_line[i]
if self._key_field:
if isinstance(self._key_field, list):
key = []
for single_key_field in self._key_field:
if self._with_header and isinstance(single_key_field, str):
single_key_field = field_name_to_index[single_key_field]
if parsed_line[single_key_field] is None:
create_event = False
break
key.append(parsed_line[single_key_field])
else:
key_field = self._key_field
if self._with_header and isinstance(key_field, str):
key_field = field_name_to_index[key_field]
key = parsed_line[key_field]
if key is None:
create_event = False
if create_event:
if self._time_field:
time_field = self._time_field
if self._with_header and isinstance(time_field, str):
time_field = field_name_to_index[time_field]
time_as_datetime = parsed_line[time_field]
else:
time_as_datetime = datetime.now()
event = Event(element, key=key, time=time_as_datetime)
self._event_buffer.put(event)
else:
if self.context:
self.context.logger.error(
f"For {parsed_line} value of key {key_field} is None"
)
except BaseException as ex:
self._event_buffer.put(ex)
self._event_buffer.put(_termination_obj)
def _get_event(self):
event = self._event_buffer.get()
if isinstance(event, BaseException):
raise event
return event
async def _run_loop(self):
asyncio.get_running_loop().run_in_executor(None, self._blocking_io_loop)
def get_multiple():
events = [self._get_event()]
while not self._event_buffer.empty() and len(events) < 128:
events.append(self._get_event())
return events
while True:
events = await asyncio.get_running_loop().run_in_executor(None, get_multiple)
for event in events:
res = await self._do_downstream(event)
if event is _termination_obj:
return res
class DataframeSource(_IterableSource):
"""Use pandas dataframe as input source for a flow.
:param dfs: A pandas dataframe, or dataframes, to be used as input source for the flow.
:param key_field: column to be used as key for events. can be list of columns
:param time_field: column to be used as time for events.
:param id_field: column to be used as ID for events.
for additional params, see documentation of :class:`~storey.flow.Flow`
"""
def __init__(self, dfs: Union[pandas.DataFrame, Iterable[pandas.DataFrame]], key_field: Optional[Union[str, List[str]]] = None,
time_field: Optional[str] = None, id_field: Optional[str] = None, **kwargs):
if key_field is not None:
kwargs['key_field'] = key_field
if time_field is not None:
kwargs['time_field'] = time_field
if id_field is not None:
kwargs['id_field'] = id_field
super().__init__(**kwargs)
if isinstance(dfs, pandas.DataFrame):
dfs = [dfs]
self._dfs = dfs
self._key_field = key_field
self._time_field = time_field
self._id_field = id_field
async def _run_loop(self):
for df in self._dfs:
for namedtuple in df.itertuples():
create_event = True
body = namedtuple._asdict()
index = body.pop('Index')
if len(df.index.names) > 1:
for i, index_column in enumerate(df.index.names):
body[index_column] = index[i]
elif df.index.names[0] is not None:
body[df.index.names[0]] = index
key = None
if self._key_field:
if isinstance(self._key_field, list):
key = []
for key_field in self._key_field:
if body[key_field] is None:
create_event = False
break
key.append(body[key_field])
else:
key = body[self._key_field]
if key is None:
create_event = False
if create_event:
time = None
if self._time_field:
time = body[self._time_field]
id = None
if self._id_field:
id = body[self._id_field]
event = Event(body, key=key, time=time, id=id)
await self._do_downstream(event)
else:
if self.context:
self.context.logger.error(
f"For {body} value of key {key_field} is None"
)
return await self._do_downstream(_termination_obj)
class ParquetSource(DataframeSource):
"""Reads Parquet files as input source for a flow.
:parameter paths: paths to Parquet files
:parameter columns : list, default=None. If not None, only these columns will be read from the file.
:parameter start_filter: datetime. If not None, the results will be filtered by partitions and 'filter_column' >= start_filter.
Default is None
:parameter end_filter: datetime. If not None, the results will be filtered by partitions 'filter_column' < end_filter.
Default is None
:parameter filter_column: Optional. if not None, the results will be filtered by this column and before and/or after
"""
def __init__(self, paths: Union[str, Iterable[str]], columns=None, start_filter: Optional[datetime] = None,
end_filter: Optional[datetime] = None, filter_column: Optional[str] = None, **kwargs):
if end_filter or start_filter:
start_filter = datetime.min if start_filter is None else start_filter
end_filter = datetime.max if end_filter is None else end_filter
if filter_column is None:
raise TypeError('Filter column is required when passing start/end filters')
self._paths = paths
if isinstance(paths, str):
self._paths = [paths]
self._columns = columns
self._start_filter = start_filter
self._end_filter = end_filter
self._filter_column = filter_column
self._storage_options = kwargs.get('storage_options')
super().__init__([], **kwargs)
def _read_filtered_parquet(self, path):
fs, file_path = url_to_file_system(path, self._storage_options)
dataset = pq.ParquetDataset(path, filesystem=fs)
if dataset.partitions:
partitions = dataset.partitions.partition_names
partitions_time_attributes = [j for j in legal_time_units if j in partitions]
else:
partitions_time_attributes = []
filters = []
find_filters(partitions_time_attributes, self._start_filter, self._end_filter, filters, self._filter_column)
return pandas.read_parquet(path, columns=self._columns, filters=filters,
storage_options=self._storage_options)
def _init(self):
self._dfs = []
for path in self._paths:
if self._start_filter or self._end_filter:
df = self._read_filtered_parquet(path)
else:
df = pandas.read_parquet(path, columns=self._columns, storage_options=self._storage_options)
drop_reserved_columns(df)
self._dfs.append(df)
|
the-stack_0_26865
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import hmac
import json
import random
import os
import secrets
import time
from hashlib import sha1
from urllib.request import Request, urlopen
def make_changes(num_changes):
changes = []
# One week ago
max_time = time.time() - 604800
head_commit = None
for x in range(num_changes):
change_id = secrets.token_hex(20)
unix_timestamp = time.time() - random.randrange(0, 604800)
change = {
"id": change_id,
"timestamp": datetime.datetime.fromtimestamp(unix_timestamp),
}
if unix_timestamp > max_time:
max_time = unix_timestamp
head_commit = change
changes.append(change)
event = {"head_commit": head_commit, "commits": changes}
return event
def make_issue(root_cause):
event = {
"issue": {
"created_at": root_cause["timestamp"],
"updated_at": datetime.datetime.now(),
"closed_at": datetime.datetime.now(),
"number": random.randrange(0, 1000),
"labels": [{"name": "Incident"}],
"body": "root cause: %s" % root_cause["id"],
}
}
return event
def send_mock_github_events(event_type, data):
webhook_url = os.environ.get("WEBHOOK")
data = json.dumps(data, default=str).encode()
secret = os.environ.get("SECRET").encode()
signature = hmac.new(secret, data, sha1)
request = Request(webhook_url, data)
request.add_header("X-Github-Event", event_type)
request.add_header("X-Hub-Signature", "sha1=" + signature.hexdigest())
request.add_header("User-Agent", "GitHub-Hookshot/mock")
request.add_header("Content-Type", "application/json")
request.add_header("Mock", True)
token = os.environ.get("TOKEN")
if token:
request.add_header("Authorization", f"Bearer {token}")
response = urlopen(request)
if response.getcode() == 204:
return 1
else:
return 0
def create_deploy_event(change):
deployment = {
"deployment_status": {
"updated_at": change["timestamp"],
"id": secrets.token_hex(20),
"state": "success",
},
"deployment": {
"sha": change["id"],
}
}
return deployment
def generate_data():
num_success = 0
changes = make_changes(2)
# Send individual changes data
for c in changes["commits"]:
curr_change = {"head_commit": c, "commits": [c]}
num_success += send_mock_github_events("push", curr_change)
# Send fully associated push event
num_success += send_mock_github_events("push", changes)
# Make and send a deployment
deploy = create_deploy_event(changes["head_commit"])
num_success += send_mock_github_events("deployment_status", deploy)
# 15% of deployments create incidents
x = random.randrange(0, 100)
if x < 15:
issue = make_issue(changes["head_commit"])
num_success += send_mock_github_events("issues", issue)
return num_success
num_success = 0
for x in range(10):
num_success += generate_data()
print(f"{num_success} changes successfully sent to event-handler")
|
the-stack_0_26868
|
#!/usr/bin/env python3
import os
import sys
import signal
import pygame
import time
import math
from displayhatmini import DisplayHATMini
print("""Display HAT Mini: Basic Pygame Demo""")
if pygame.vernum < (2, 0, 0):
print("Need PyGame >= 2.0.0:\n python3 -m pip install pygame --upgrade")
sys.exit(1)
def _exit(sig, frame):
global running
running = False
print("\nExiting!...\n")
def update_display():
display_hat.st7789.set_window()
# Grab the pygame screen as a bytes object
pixelbytes = pygame.transform.rotate(screen, 180).convert(16, 0).get_buffer()
# Lazy (slow) byteswap:
pixelbytes = bytearray(pixelbytes)
pixelbytes[0::2], pixelbytes[1::2] = pixelbytes[1::2], pixelbytes[0::2]
# Bypass the ST7789 PIL image RGB888->RGB565 conversion
for i in range(0, len(pixelbytes), 4096):
display_hat.st7789.data(pixelbytes[i:i + 4096])
display_hat = DisplayHATMini(None)
os.putenv('SDL_VIDEODRIVER', 'dummy')
pygame.display.init() # Need to init for .convert() to work
screen = pygame.Surface((display_hat.WIDTH, display_hat.HEIGHT))
signal.signal(signal.SIGINT, _exit)
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
break
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
running = False
break
# Clear the screen
screen.fill((0, 0, 0))
box_w = display_hat.WIDTH // 3
box_h = display_hat.HEIGHT // 2
pygame.draw.rect(screen, (255, 0, 0), (0, 0, box_w, box_h))
pygame.draw.rect(screen, (0, 255, 0), (box_w, 0, box_w, box_h))
pygame.draw.rect(screen, (0, 0, 255), (box_w * 2, 0, box_w, box_h))
pygame.draw.rect(screen, (255, 255, 0), (0, box_h, box_w, box_h))
pygame.draw.rect(screen, (255, 0, 255), (box_w, box_h, box_w, box_h))
pygame.draw.rect(screen, (0, 255, 255), (box_w * 2, box_h, box_w, box_h))
r = 50
x = math.sin(time.time() * 2) * (display_hat.WIDTH - r) / 2
y = math.cos(time.time()) * (display_hat.HEIGHT - r) / 2
x += display_hat.WIDTH // 2
y += display_hat.HEIGHT // 2
pygame.draw.circle(screen, (0, 0, 0), (int(x), int(y)), r)
update_display()
pygame.quit()
sys.exit(0)
|
the-stack_0_26869
|
# Check whether the original sequence org can be uniquely reconstructed from the sequences in seqs.
# The org sequence is a permutation of the integers from 1 to n, with 1 ≤ n ≤ 10^4.
# Reconstruction means building a shortest common supersequence of the sequences in seqs
# (i.e., a shortest sequence so that all sequences in seqs are subsequences of it).
# Determine whether there is only one sequence that can be reconstructed from seqs and it is the org sequence.
# Example 1:
# Input:
# org: [1,2,3], seqs: [[1,2],[1,3]]
# Output:
# false
# Explanation:
# [1,2,3] is not the only one sequence that can be reconstructed, because [1,3,2] is also a valid sequence that can be reconstructed.
# Example 2:
# Input:
# org: [1,2,3], seqs: [[1,2]]
# Output:
# false
# Explanation:
# The reconstructed sequence can only be [1,2].
# Example 3:
# Input:
# org: [1,2,3], seqs: [[1,2],[1,3],[2,3]]
# Output:
# true
# Explanation:
# The sequences [1,2], [1,3], and [2,3] can uniquely reconstruct the original sequence [1,2,3].
# Example 4:
# Input:
# org: [4,1,5,2,6,3], seqs: [[5,2,6,3],[4,1,5,2]]
# Output:
# true
# UPDATE (2017/1/8):
# The seqs parameter had been changed to a list of list of strings (instead of a 2d array of strings).
# Please reload the code definition to get the latest changes.
class Solution(object):
def sequenceReconstruction(self, org, seqs):
"""
:type org: List[int]
:type seqs: List[List[int]]
:rtype: bool
"""
# https://blog.csdn.net/liuchenjane/article/details/52983666
# 先判断seqs里的每一个seq是否是org里的子序列,
# 首先记载每个元素在org中的下标,如果子序列的元素中,后面元素的下标比前面的小,则不是子序列。
# 然后,对于判断由这些子序列是否可以重构原始的org。
# 后面的seqs中必须满足以下下条件,对于org中任意相邻的两个数x和y,在seqs中必定存在相邻的x和y来表示它们的相对次序。
# 否则,x和y的位置是可交换的,与题目中的唯一重构的要求相悖。
if not seqs or not org:
return False
idx = {}
for i, c in enumerate(org):
idx[c] = i
# org: [1,2,3], seqs: [[1,2],[1,3],[2,3]]
pair = {}
for seq in seqs:
for i in range(len(seq)):
# 子序列中有org中未出现的字母
if seq[i] not in idx:
return False
# 子序列中相邻的字符顺序与org中不一致
if i > 0 and idx[seq[i-1]] >= idx[seq[i]]:
return False
# 子序列中相邻的字符顺序与org中一致
if i > 0 and idx[seq[i-1]]+1 == idx[seq[i]]:
pair[idx[s[i-1]]] = 1
if seq and seq[-1] == org[-1]:
pair[len(org)-1] = 1
for i in range(len(org)):
# org中前后相邻的字母没有完全出现在子序列中
if i not in pair:
return False
return True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.