blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a01bf4fdb740165c86936ed2d5025cd058dd3f36 | 96d284595c07bf3eb135f88a1a1e5fd2692293dc | /labml_db/serializer/yaml.py | f0afb684e1e52fd58864477a7b5da669068d9e55 | [
"MIT"
]
| permissive | actuarial-tools/db | cfc3be5b027fe8127032ccecf64a295a83601eba | f6be48f8c724bf76fc6dc966cb4e13bd6501e911 | refs/heads/master | 2023-01-13T19:21:49.783395 | 2020-11-24T03:26:13 | 2020-11-24T03:26:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py |
from . import Serializer
from .utils import encode_keys, decode_keys
from ..types import ModelDict
class YamlSerializer(Serializer):
file_extension = 'yaml'
def to_string(self, data: ModelDict) -> str:
import yaml
return yaml.dump(encode_keys(data), default_flow_style=False)
def from_string(self, data: str) -> ModelDict:
import yaml
return decode_keys(yaml.load(data, Loader=yaml.FullLoader))
| [
"[email protected]"
]
| |
6ee2ef466ca99a5b08c1d0f40939839443d1d676 | 907b3bbd44c95be1542a36feaadb6a71b724579f | /files/usr/tmp/pip-build-nyxh8e0k/google-cloud-storage/google/cloud/storage/blob.py | b409bc29afcdb29eb12aa0545a8fbc45bb46f2c6 | []
| no_license | vo0doO/com.termux | 2d8f536c1a5dbd7a091be0baf181e51f235fb941 | c97dd7b906e5ef3ec157581fd0bcadd3e3fc220e | refs/heads/master | 2020-12-24T09:40:30.612130 | 2016-11-21T07:47:25 | 2016-11-21T07:47:25 | 73,282,539 | 2 | 2 | null | 2020-07-24T21:33:03 | 2016-11-09T12:33:01 | Python | UTF-8 | Python | false | false | 38,452 | py | # Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create / interact with Google Cloud Storage blobs."""
import base64
import copy
import hashlib
from io import BytesIO
from io import UnsupportedOperation
import json
import mimetypes
import os
import time
import httplib2
import six
from six.moves.urllib.parse import quote
from google.cloud._helpers import _rfc3339_to_datetime
from google.cloud._helpers import _to_bytes
from google.cloud._helpers import _bytes_to_unicode
from google.cloud.credentials import generate_signed_url
from google.cloud.exceptions import NotFound
from google.cloud.exceptions import make_exception
from google.cloud.storage._helpers import _PropertyMixin
from google.cloud.storage._helpers import _scalar_property
from google.cloud.storage.acl import ObjectACL
from google.cloud.streaming.http_wrapper import Request
from google.cloud.streaming.http_wrapper import make_api_request
from google.cloud.streaming.transfer import Download
from google.cloud.streaming.transfer import RESUMABLE_UPLOAD
from google.cloud.streaming.transfer import Upload
_API_ACCESS_ENDPOINT = 'https://storage.googleapis.com'
class Blob(_PropertyMixin):
"""A wrapper around Cloud Storage's concept of an ``Object``.
:type name: str
:param name: The name of the blob. This corresponds to the
unique path of the object in the bucket.
:type bucket: :class:`google.cloud.storage.bucket.Bucket`
:param bucket: The bucket to which this blob belongs.
:type chunk_size: int
:param chunk_size: The size of a chunk of data whenever iterating (1 MB).
This must be a multiple of 256 KB per the API
specification.
:type encryption_key: bytes
:param encryption_key:
Optional 32 byte encryption key for customer-supplied encryption.
See https://cloud.google.com/storage/docs/encryption#customer-supplied
"""
_chunk_size = None # Default value for each instance.
_CHUNK_SIZE_MULTIPLE = 256 * 1024
"""Number (256 KB, in bytes) that must divide the chunk size."""
def __init__(self, name, bucket, chunk_size=None, encryption_key=None):
super(Blob, self).__init__(name=name)
self.chunk_size = chunk_size # Check that setter accepts value.
self.bucket = bucket
self._acl = ObjectACL(self)
self._encryption_key = encryption_key
@property
def chunk_size(self):
"""Get the blob's default chunk size.
:rtype: int or ``NoneType``
:returns: The current blob's chunk size, if it is set.
"""
return self._chunk_size
@chunk_size.setter
def chunk_size(self, value):
"""Set the blob's default chunk size.
:type value: int
:param value: (Optional) The current blob's chunk size, if it is set.
:raises: :class:`ValueError` if ``value`` is not ``None`` and is not a
multiple of 256 KB.
"""
if value is not None and value % self._CHUNK_SIZE_MULTIPLE != 0:
raise ValueError('Chunk size must be a multiple of %d.' % (
self._CHUNK_SIZE_MULTIPLE,))
self._chunk_size = value
@staticmethod
def path_helper(bucket_path, blob_name):
"""Relative URL path for a blob.
:type bucket_path: str
:param bucket_path: The URL path for a bucket.
:type blob_name: str
:param blob_name: The name of the blob.
:rtype: str
:returns: The relative URL path for ``blob_name``.
"""
return bucket_path + '/o/' + quote(blob_name, safe='')
@property
def acl(self):
"""Create our ACL on demand."""
return self._acl
def __repr__(self):
if self.bucket:
bucket_name = self.bucket.name
else:
bucket_name = None
return '<Blob: %s, %s>' % (bucket_name, self.name)
@property
def path(self):
"""Getter property for the URL path to this Blob.
:rtype: str
:returns: The URL path to this Blob.
"""
if not self.name:
raise ValueError('Cannot determine path without a blob name.')
return self.path_helper(self.bucket.path, self.name)
@property
def client(self):
"""The client bound to this blob."""
return self.bucket.client
@property
def public_url(self):
"""The public URL for this blob's object.
:rtype: `string`
:returns: The public URL for this blob.
"""
return '{storage_base_url}/{bucket_name}/{quoted_name}'.format(
storage_base_url='https://storage.googleapis.com',
bucket_name=self.bucket.name,
quoted_name=quote(self.name, safe=''))
def generate_signed_url(self, expiration, method='GET',
content_type=None,
generation=None, response_disposition=None,
response_type=None, client=None, credentials=None):
"""Generates a signed URL for this blob.
.. note::
If you are on Google Compute Engine, you can't generate a signed
URL. Follow `Issue 922`_ for updates on this. If you'd like to
be able to generate a signed URL from GCE, you can use a standard
service account from a JSON file rather than a GCE service account.
.. _Issue 922: https://github.com/GoogleCloudPlatform/\
google-cloud-python/issues/922
If you have a blob that you want to allow access to for a set
amount of time, you can use this method to generate a URL that
is only valid within a certain time period.
This is particularly useful if you don't want publicly
accessible blobs, but don't want to require users to explicitly
log in.
:type expiration: int, long, datetime.datetime, datetime.timedelta
:param expiration: When the signed URL should expire.
:type method: str
:param method: The HTTP verb that will be used when requesting the URL.
:type content_type: str
:param content_type: (Optional) The content type of the object
referenced by ``resource``.
:type generation: str
:param generation: (Optional) A value that indicates which generation
of the resource to fetch.
:type response_disposition: str
:param response_disposition: (Optional) Content disposition of
responses to requests for the signed URL.
For example, to enable the signed URL
to initiate a file of ``blog.png``, use
the value
``'attachment; filename=blob.png'``.
:type response_type: str
:param response_type: (Optional) Content type of responses to requests
for the signed URL. Used to over-ride the content
type of the underlying blob/object.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: (Optional) The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:type credentials: :class:`oauth2client.client.OAuth2Credentials` or
:class:`NoneType`
:param credentials: (Optional) The OAuth2 credentials to use to sign
the URL. Defaults to the credentials stored on the
client used.
:rtype: str
:returns: A signed URL you can use to access the resource
until expiration.
"""
resource = '/{bucket_name}/{quoted_name}'.format(
bucket_name=self.bucket.name,
quoted_name=quote(self.name, safe=''))
if credentials is None:
client = self._require_client(client)
credentials = client._base_connection.credentials
return generate_signed_url(
credentials, resource=resource,
api_access_endpoint=_API_ACCESS_ENDPOINT,
expiration=expiration, method=method,
content_type=content_type,
response_type=response_type,
response_disposition=response_disposition,
generation=generation)
def exists(self, client=None):
"""Determines whether or not this blob exists.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:rtype: bool
:returns: True if the blob exists in Cloud Storage.
"""
client = self._require_client(client)
try:
# We only need the status code (200 or not) so we seek to
# minimize the returned payload.
query_params = {'fields': 'name'}
# We intentionally pass `_target_object=None` since fields=name
# would limit the local properties.
client._connection.api_request(
method='GET', path=self.path,
query_params=query_params, _target_object=None)
# NOTE: This will not fail immediately in a batch. However, when
# Batch.finish() is called, the resulting `NotFound` will be
# raised.
return True
except NotFound:
return False
def delete(self, client=None):
"""Deletes a blob from Cloud Storage.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:rtype: :class:`Blob`
:returns: The blob that was just deleted.
:raises: :class:`google.cloud.exceptions.NotFound`
(propagated from
:meth:`google.cloud.storage.bucket.Bucket.delete_blob`).
"""
return self.bucket.delete_blob(self.name, client=client)
def download_to_file(self, file_obj, client=None):
"""Download the contents of this blob into a file-like object.
.. note::
If the server-set property, :attr:`media_link`, is not yet
initialized, makes an additional API request to load it.
Downloading a file that has been encrypted with a `customer-supplied`_
encryption key::
>>> from google.cloud import storage
>>> from google.cloud.storage import Blob
>>> client = storage.Client(project='my-project')
>>> bucket = client.get_bucket('my-bucket')
>>> encryption_key = 'aa426195405adee2c8081bb9e7e74b19'
>>> blob = Blob('secure-data', bucket,
... encryption_key=encryption_key)
>>> with open('/tmp/my-secure-file', 'wb') as file_obj:
>>> blob.download_to_file(file_obj)
The ``encryption_key`` should be a str or bytes with a length of at
least 32.
.. _customer-supplied: https://cloud.google.com/storage/docs/\
encryption#customer-supplied
:type file_obj: file
:param file_obj: A file handle to which to write the blob's data.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:raises: :class:`google.cloud.exceptions.NotFound`
"""
client = self._require_client(client)
if self.media_link is None: # not yet loaded
self.reload()
download_url = self.media_link
# Use apitools 'Download' facility.
download = Download.from_stream(file_obj)
if self.chunk_size is not None:
download.chunksize = self.chunk_size
headers = _get_encryption_headers(self._encryption_key)
request = Request(download_url, 'GET', headers)
# Use ``_base_connection`` rather ``_connection`` since the current
# connection may be a batch. A batch wraps a client's connection,
# but does not store the ``http`` object. The rest (API_BASE_URL and
# build_api_url) are also defined on the Batch class, but we just
# use the wrapped connection since it has all three (http,
# API_BASE_URL and build_api_url).
download.initialize_download(request, client._base_connection.http)
def download_to_filename(self, filename, client=None):
"""Download the contents of this blob into a named file.
:type filename: str
:param filename: A filename to be passed to ``open``.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:raises: :class:`google.cloud.exceptions.NotFound`
"""
with open(filename, 'wb') as file_obj:
self.download_to_file(file_obj, client=client)
mtime = time.mktime(self.updated.timetuple())
os.utime(file_obj.name, (mtime, mtime))
def download_as_string(self, client=None):
"""Download the contents of this blob as a string.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:rtype: bytes
:returns: The data stored in this blob.
:raises: :class:`google.cloud.exceptions.NotFound`
"""
string_buffer = BytesIO()
self.download_to_file(string_buffer, client=client)
return string_buffer.getvalue()
@staticmethod
def _check_response_error(request, http_response):
"""Helper for :meth:`upload_from_file`."""
info = http_response.info
status = int(info['status'])
if not 200 <= status < 300:
faux_response = httplib2.Response({'status': status})
raise make_exception(faux_response, http_response.content,
error_info=request.url)
# pylint: disable=too-many-locals
def upload_from_file(self, file_obj, rewind=False, size=None,
content_type=None, num_retries=6, client=None):
"""Upload the contents of this blob from a file-like object.
The content type of the upload will either be
- The value passed in to the function (if any)
- The value stored on the current blob
- The default value of 'application/octet-stream'
.. note::
The effect of uploading to an existing blob depends on the
"versioning" and "lifecycle" policies defined on the blob's
bucket. In the absence of those policies, upload will
overwrite any existing contents.
See the `object versioning
<https://cloud.google.com/storage/docs/object-versioning>`_ and
`lifecycle <https://cloud.google.com/storage/docs/lifecycle>`_
API documents for details.
Uploading a file with a `customer-supplied`_ encryption key::
>>> from google.cloud import storage
>>> from google.cloud.storage import Blob
>>> client = storage.Client(project='my-project')
>>> bucket = client.get_bucket('my-bucket')
>>> encryption_key = 'aa426195405adee2c8081bb9e7e74b19'
>>> blob = Blob('secure-data', bucket,
... encryption_key=encryption_key)
>>> with open('my-file', 'rb') as my_file:
>>> blob.upload_from_file(my_file)
The ``encryption_key`` should be a str or bytes with a length of at
least 32.
.. _customer-supplied: https://cloud.google.com/storage/docs/\
encryption#customer-supplied
:type file_obj: file
:param file_obj: A file handle open for reading.
:type rewind: bool
:param rewind: If True, seek to the beginning of the file handle before
writing the file to Cloud Storage.
:type size: int
:param size: The number of bytes to read from the file handle.
If not provided, we'll try to guess the size using
:func:`os.fstat`. (If the file handle is not from the
filesystem this won't be possible.)
:type content_type: str
:param content_type: Optional type of content being uploaded.
:type num_retries: int
:param num_retries: Number of upload retries. Defaults to 6.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:raises: :class:`ValueError` if size is not passed in and can not be
determined; :class:`google.cloud.exceptions.GoogleCloudError`
if the upload response returns an error status.
"""
client = self._require_client(client)
# Use ``_base_connection`` rather ``_connection`` since the current
# connection may be a batch. A batch wraps a client's connection,
# but does not store the ``http`` object. The rest (API_BASE_URL and
# build_api_url) are also defined on the Batch class, but we just
# use the wrapped connection since it has all three (http,
# API_BASE_URL and build_api_url).
connection = client._base_connection
content_type = (content_type or self._properties.get('contentType') or
'application/octet-stream')
# Rewind the file if desired.
if rewind:
file_obj.seek(0, os.SEEK_SET)
# Get the basic stats about the file.
total_bytes = size
if total_bytes is None:
if hasattr(file_obj, 'fileno'):
try:
total_bytes = os.fstat(file_obj.fileno()).st_size
except (OSError, UnsupportedOperation):
pass # Assuming fd is not an actual file (maybe socket).
headers = {
'Accept': 'application/json',
'Accept-Encoding': 'gzip, deflate',
'User-Agent': connection.USER_AGENT,
}
headers.update(_get_encryption_headers(self._encryption_key))
upload = Upload(file_obj, content_type, total_bytes,
auto_transfer=False)
if self.chunk_size is not None:
upload.chunksize = self.chunk_size
if total_bytes is None:
upload.strategy = RESUMABLE_UPLOAD
elif total_bytes is None:
raise ValueError('total bytes could not be determined. Please '
'pass an explicit size, or supply a chunk size '
'for a streaming transfer.')
url_builder = _UrlBuilder(bucket_name=self.bucket.name,
object_name=self.name)
upload_config = _UploadConfig()
# Temporary URL, until we know simple vs. resumable.
base_url = connection.API_BASE_URL + '/upload'
upload_url = connection.build_api_url(api_base_url=base_url,
path=self.bucket.path + '/o')
# Use apitools 'Upload' facility.
request = Request(upload_url, 'POST', headers)
upload.configure_request(upload_config, request, url_builder)
query_params = url_builder.query_params
base_url = connection.API_BASE_URL + '/upload'
request.url = connection.build_api_url(api_base_url=base_url,
path=self.bucket.path + '/o',
query_params=query_params)
upload.initialize_upload(request, connection.http)
if upload.strategy == RESUMABLE_UPLOAD:
http_response = upload.stream_file(use_chunks=True)
else:
http_response = make_api_request(connection.http, request,
retries=num_retries)
self._check_response_error(request, http_response)
response_content = http_response.content
if not isinstance(response_content,
six.string_types): # pragma: NO COVER Python3
response_content = response_content.decode('utf-8')
self._set_properties(json.loads(response_content))
# pylint: enable=too-many-locals
def upload_from_filename(self, filename, content_type=None, client=None):
"""Upload this blob's contents from the content of a named file.
The content type of the upload will either be
- The value passed in to the function (if any)
- The value stored on the current blob
- The value given by mimetypes.guess_type
.. note::
The effect of uploading to an existing blob depends on the
"versioning" and "lifecycle" policies defined on the blob's
bucket. In the absence of those policies, upload will
overwrite any existing contents.
See the `object versioning
<https://cloud.google.com/storage/docs/object-versioning>`_ and
`lifecycle <https://cloud.google.com/storage/docs/lifecycle>`_
API documents for details.
:type filename: str
:param filename: The path to the file.
:type content_type: str
:param content_type: Optional type of content being uploaded.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
"""
content_type = content_type or self._properties.get('contentType')
if content_type is None:
content_type, _ = mimetypes.guess_type(filename)
with open(filename, 'rb') as file_obj:
self.upload_from_file(
file_obj, content_type=content_type, client=client)
def upload_from_string(self, data, content_type='text/plain', client=None):
"""Upload contents of this blob from the provided string.
.. note::
The effect of uploading to an existing blob depends on the
"versioning" and "lifecycle" policies defined on the blob's
bucket. In the absence of those policies, upload will
overwrite any existing contents.
See the `object versioning
<https://cloud.google.com/storage/docs/object-versioning>`_ and
`lifecycle <https://cloud.google.com/storage/docs/lifecycle>`_
API documents for details.
:type data: bytes or text
:param data: The data to store in this blob. If the value is
text, it will be encoded as UTF-8.
:type content_type: str
:param content_type: Optional type of content being uploaded. Defaults
to ``'text/plain'``.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
"""
if isinstance(data, six.text_type):
data = data.encode('utf-8')
string_buffer = BytesIO()
string_buffer.write(data)
self.upload_from_file(
file_obj=string_buffer, rewind=True, size=len(data),
content_type=content_type, client=client)
def make_public(self, client=None):
"""Make this blob public giving all users read access.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
"""
self.acl.all().grant_read()
self.acl.save(client=client)
def compose(self, sources, client=None):
"""Concatenate source blobs into this one.
:type sources: list of :class:`Blob`
:param sources: blobs whose contents will be composed into this blob.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:raises: :exc:`ValueError` if this blob does not have its
:attr:`content_type` set.
"""
if self.content_type is None:
raise ValueError("Destination 'content_type' not set.")
client = self._require_client(client)
request = {
'sourceObjects': [{'name': source.name} for source in sources],
'destination': self._properties.copy(),
}
api_response = client._connection.api_request(
method='POST', path=self.path + '/compose', data=request,
_target_object=self)
self._set_properties(api_response)
def rewrite(self, source, token=None, client=None):
"""Rewrite source blob into this one.
:type source: :class:`Blob`
:param source: blob whose contents will be rewritten into this blob.
:type token: str
:param token: Optional. Token returned from an earlier, not-completed
call to rewrite the same source blob. If passed,
result will include updated status, total bytes written.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:rtype: tuple
:returns: ``(token, bytes_rewritten, total_bytes)``, where ``token``
is a rewrite token (``None`` if the rewrite is complete),
``bytes_rewritten`` is the number of bytes rewritten so far,
and ``total_bytes`` is the total number of bytes to be
rewritten.
"""
client = self._require_client(client)
headers = _get_encryption_headers(self._encryption_key)
headers.update(_get_encryption_headers(
source._encryption_key, source=True))
if token:
query_params = {'rewriteToken': token}
else:
query_params = {}
api_response = client._connection.api_request(
method='POST', path=source.path + '/rewriteTo' + self.path,
query_params=query_params, data=self._properties, headers=headers,
_target_object=self)
self._set_properties(api_response['resource'])
rewritten = int(api_response['totalBytesRewritten'])
size = int(api_response['objectSize'])
if api_response['done']:
return None, rewritten, size
return api_response['rewriteToken'], rewritten, size
cache_control = _scalar_property('cacheControl')
"""HTTP 'Cache-Control' header for this object.
See: https://tools.ietf.org/html/rfc7234#section-5.2 and
https://cloud.google.com/storage/docs/json_api/v1/objects
If the property is not set locally, returns ``None``.
:rtype: str or ``NoneType``
"""
content_disposition = _scalar_property('contentDisposition')
"""HTTP 'Content-Disposition' header for this object.
See: https://tools.ietf.org/html/rfc6266 and
https://cloud.google.com/storage/docs/json_api/v1/objects
If the property is not set locally, returns ``None``.
:rtype: str or ``NoneType``
"""
content_encoding = _scalar_property('contentEncoding')
"""HTTP 'Content-Encoding' header for this object.
See: https://tools.ietf.org/html/rfc7231#section-3.1.2.2 and
https://cloud.google.com/storage/docs/json_api/v1/objects
If the property is not set locally, returns ``None``.
:rtype: str or ``NoneType``
"""
content_language = _scalar_property('contentLanguage')
"""HTTP 'Content-Language' header for this object.
See: http://tools.ietf.org/html/bcp47 and
https://cloud.google.com/storage/docs/json_api/v1/objects
If the property is not set locally, returns ``None``.
:rtype: str or ``NoneType``
"""
content_type = _scalar_property('contentType')
"""HTTP 'Content-Type' header for this object.
See: https://tools.ietf.org/html/rfc2616#section-14.17 and
https://cloud.google.com/storage/docs/json_api/v1/objects
If the property is not set locally, returns ``None``.
:rtype: str or ``NoneType``
"""
crc32c = _scalar_property('crc32c')
"""CRC32C checksum for this object.
See: http://tools.ietf.org/html/rfc4960#appendix-B and
https://cloud.google.com/storage/docs/json_api/v1/objects
If the property is not set locally, returns ``None``.
:rtype: str or ``NoneType``
"""
@property
def component_count(self):
"""Number of underlying components that make up this object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: int or ``NoneType``
:returns: The component count (in case of a composed object) or
``None`` if the property is not set locally. This property
will not be set on objects not created via ``compose``.
"""
component_count = self._properties.get('componentCount')
if component_count is not None:
return int(component_count)
@property
def etag(self):
"""Retrieve the ETag for the object.
See: http://tools.ietf.org/html/rfc2616#section-3.11 and
https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: str or ``NoneType``
:returns: The blob etag or ``None`` if the property is not set locally.
"""
return self._properties.get('etag')
@property
def generation(self):
"""Retrieve the generation for the object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: int or ``NoneType``
:returns: The generation of the blob or ``None`` if the property
is not set locally.
"""
generation = self._properties.get('generation')
if generation is not None:
return int(generation)
@property
def id(self):
"""Retrieve the ID for the object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: str or ``NoneType``
:returns: The ID of the blob or ``None`` if the property is not
set locally.
"""
return self._properties.get('id')
md5_hash = _scalar_property('md5Hash')
"""MD5 hash for this object.
See: http://tools.ietf.org/html/rfc4960#appendix-B and
https://cloud.google.com/storage/docs/json_api/v1/objects
If the property is not set locally, returns ``None``.
:rtype: str or ``NoneType``
"""
@property
def media_link(self):
"""Retrieve the media download URI for the object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: str or ``NoneType``
:returns: The media link for the blob or ``None`` if the property is
not set locally.
"""
return self._properties.get('mediaLink')
@property
def metadata(self):
"""Retrieve arbitrary/application specific metadata for the object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: dict or ``NoneType``
:returns: The metadata associated with the blob or ``None`` if the
property is not set locally.
"""
return copy.deepcopy(self._properties.get('metadata'))
@metadata.setter
def metadata(self, value):
"""Update arbitrary/application specific metadata for the object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:type value: dict
:param value: (Optional) The blob metadata to set.
"""
self._patch_property('metadata', value)
@property
def metageneration(self):
"""Retrieve the metageneration for the object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: int or ``NoneType``
:returns: The metageneration of the blob or ``None`` if the property
is not set locally.
"""
metageneration = self._properties.get('metageneration')
if metageneration is not None:
return int(metageneration)
@property
def owner(self):
"""Retrieve info about the owner of the object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: dict or ``NoneType``
:returns: Mapping of owner's role/ID. If the property is not set
locally, returns ``None``.
"""
return copy.deepcopy(self._properties.get('owner'))
@property
def self_link(self):
"""Retrieve the URI for the object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: str or ``NoneType``
:returns: The self link for the blob or ``None`` if the property is
not set locally.
"""
return self._properties.get('selfLink')
@property
def size(self):
"""Size of the object, in bytes.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: int or ``NoneType``
:returns: The size of the blob or ``None`` if the property
is not set locally.
"""
size = self._properties.get('size')
if size is not None:
return int(size)
@property
def storage_class(self):
"""Retrieve the storage class for the object.
See: https://cloud.google.com/storage/docs/storage-classes
:rtype: str or ``NoneType``
:returns: If set, one of "MULTI_REGIONAL", "REGIONAL",
"NEARLINE", "COLDLINE", "STANDARD", or
"DURABLE_REDUCED_AVAILABILITY", else ``None``.
"""
return self._properties.get('storageClass')
@property
def time_deleted(self):
"""Retrieve the timestamp at which the object was deleted.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: :class:`datetime.datetime` or ``NoneType``
:returns: Datetime object parsed from RFC3339 valid timestamp, or
``None`` if the property is not set locally. If the blob has
not been deleted, this will never be set.
"""
value = self._properties.get('timeDeleted')
if value is not None:
return _rfc3339_to_datetime(value)
@property
def updated(self):
"""Retrieve the timestamp at which the object was updated.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: :class:`datetime.datetime` or ``NoneType``
:returns: Datetime object parsed from RFC3339 valid timestamp, or
``None`` if the property is not set locally.
"""
value = self._properties.get('updated')
if value is not None:
return _rfc3339_to_datetime(value)
class _UploadConfig(object):
"""Faux message FBO apitools' 'configure_request'.
Values extracted from apitools
'samples/storage_sample/storage/storage_v1_client.py'
"""
accept = ['*/*']
max_size = None
resumable_multipart = True
resumable_path = u'/resumable/upload/storage/v1/b/{bucket}/o'
simple_multipart = True
simple_path = u'/upload/storage/v1/b/{bucket}/o'
class _UrlBuilder(object):
"""Faux builder FBO apitools' 'configure_request'"""
def __init__(self, bucket_name, object_name):
self.query_params = {'name': object_name}
self._bucket_name = bucket_name
self._relative_path = ''
def _get_encryption_headers(key, source=False):
"""Builds customer encryption key headers
:type key: bytes
:param key: 32 byte key to build request key and hash.
:type source: bool
:param source: If true, return headers for the "source" blob; otherwise,
return headers for the "destination" blob.
:rtype: dict
:returns: dict of HTTP headers being sent in request.
"""
if key is None:
return {}
key = _to_bytes(key)
key_hash = hashlib.sha256(key).digest()
key_hash = base64.b64encode(key_hash).rstrip()
key = base64.b64encode(key).rstrip()
if source:
prefix = 'X-Goog-Copy-Source-Encryption-'
else:
prefix = 'X-Goog-Encryption-'
return {
prefix + 'Algorithm': 'AES256',
prefix + 'Key': _bytes_to_unicode(key),
prefix + 'Key-Sha256': _bytes_to_unicode(key_hash),
}
| [
"[email protected]"
]
| |
b6540baaefcf2ad5c0dc65ec15d031a6d31b70a0 | ec1059f4ccea10deb2cb8fd7f9458700a5e6ca4c | /venv/Lib/site-packages/qiskit/test/mock/backends/cambridge/fake_cambridge.py | 9391cbac2bdaa8760fd1c54ad790775b685e29f5 | [
"Apache-2.0",
"MIT"
]
| permissive | shivam675/Quantum-CERN | b60c697a3a7ad836b3653ee9ce3875a6eafae3ba | ce02d9198d9f5a1aa828482fea9b213a725b56bb | refs/heads/main | 2023-01-06T20:07:15.994294 | 2020-11-13T10:01:38 | 2020-11-13T10:01:38 | 330,435,191 | 1 | 0 | MIT | 2021-01-17T16:29:26 | 2021-01-17T16:29:25 | null | UTF-8 | Python | false | false | 2,107 | py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Fake Cambridge device (20 qubit).
"""
import os
import json
from qiskit.providers.models import QasmBackendConfiguration, BackendProperties
from qiskit.test.mock.fake_backend import FakeBackend
class FakeCambridge(FakeBackend):
"""A fake Cambridge backend."""
def __init__(self):
"""
00 ↔ 01 ↔ 02 ↔ 03 ↔ 04
↕ ↕
05 06
↕ ↕
07 ↔ 08 ↔ 09 ↔ 10 ↔ 11 ↔ 12 ↔ 13 ↔ 14 ↔ 15
↕ ↕ ↕
16 17 18
↕ ↕ ↕
19 ↔ 20 ↔ 21 ↔ 22 ↔ 23 ↔ 24 ↔ 25 ↔ 26 ↔ 27
"""
dirname = os.path.dirname(__file__)
filename = "conf_cambridge.json"
with open(os.path.join(dirname, filename)) as f_conf:
conf = json.load(f_conf)
configuration = QasmBackendConfiguration.from_dict(conf)
configuration.backend_name = 'fake_cambridge'
self._defaults = None
self._properties = None
super().__init__(configuration)
def properties(self):
"""Returns a snapshot of device properties"""
if not self._properties:
dirname = os.path.dirname(__file__)
filename = "props_cambridge.json"
with open(os.path.join(dirname, filename)) as f_prop:
props = json.load(f_prop)
self._properties = BackendProperties.from_dict(props)
return self._properties
| [
"[email protected]"
]
| |
3484100f82f3f01cf5341d95128ee9886dc4357f | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_199/3642.py | 933226fea834fae98b48a12ad1623499f69186ae | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | import sys
def func(arr, w):
s = [0]*len(arr)
sum = ans = 0
for i in range(len(arr)):
s[i] += (arr[i]+sum)%2 != 1
sum += s[i]- (s[i-w+1] if (i>= w-1) else 0)
ans += s[i]
if(i > len(arr)-w and s[i] != 0):
return 'IMPOSSIBLE'
return str(ans)
fname = 'input2.txt'
with open(fname) as f:
content = f.readlines()
lines = content[1:]
orig_stdout = sys.stdout
fout = open('out1.txt', 'w')
sys.stdout = fout
for j,line in enumerate(lines):
st, w = line.split()
arr = [(1 if i =='+' else 0) for i in st]
print 'Case #'+str(j+1)+': '+func(arr, int(w))
sys.stdout = orig_stdout
f.close()
fout.close()
| [
"[email protected]"
]
| |
3ae157fbdde69ca5497d9b5eae0d633b914024c0 | d115cf7a1b374d857f6b094d4b4ccd8e9b1ac189 | /pyplusplus_dev/pyplusplus/creators_factory/types_database.py | 50a37df35aa71405c160cd1912c6385cde48b2e0 | [
"BSL-1.0"
]
| permissive | gatoatigrado/pyplusplusclone | 30af9065fb6ac3dcce527c79ed5151aade6a742f | a64dc9aeeb718b2f30bd6a5ff8dcd8bfb1cd2ede | refs/heads/master | 2016-09-05T23:32:08.595261 | 2010-05-16T10:53:45 | 2010-05-16T10:53:45 | 700,369 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,458 | py | # Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from pyplusplus import messages
from pygccxml import declarations
from pyplusplus import code_creators
from pyplusplus import _logging_
templates = declarations.templates
class types_database_t( object ):
def __init__( self ):
object.__init__( self )
self.__variables = {} # decl_string : [type]
self.__return_types = {} # decl_string : [type]
self.__arguments_types = {} #decl_string : [type]
self.__smart_ptrs = [ 'shared_ptr', 'auto_ptr' ]
self.__fundamental_strs = declarations.FUNDAMENTAL_TYPES.keys()
self.__normalize_data = [ ',', '<', '>', '*', '&', '(', ')', '::' ]
self.__containers = set()
def update_containers( self, decl ):
assert decl.indexing_suite
self.__containers.add( decl )
def update( self, decl ):
if isinstance( decl, declarations.calldef_t ):
if not isinstance( decl, declarations.constructor_t ):
self._update_db( self.__return_types, decl.return_type )
map( lambda arg: self._update_db( self.__arguments_types, arg.type )
, decl.arguments )
elif isinstance( decl, declarations.variable_t ):
self._update_db( self.__variables, decl.type )
else:
assert not "types_database_t class can not process " + str( decl )
def _is_relevant(self, decl_string):
for smart_ptr in self.__smart_ptrs:
if smart_ptr in decl_string:
return True
return False
def _is_relevant_inst( self, name, args ):
return self._is_relevant( name )
def _normalize( self, decl_string ):
if decl_string.startswith( '::' ):
decl_string = decl_string[2:]
answer = decl_string
for data in self.__normalize_data:
answer = answer.replace( data + ' ', data )
answer = answer.replace( ' ' + data, data )
return answer.replace( ' ', ' ' )
def _update_containers_db( self, type_ ):
#will return True is type was treated
type_ = declarations.remove_alias( type_ )
type_ = declarations.remove_pointer( type_ )
type_ = declarations.remove_reference( type_ )
type_ = declarations.remove_cv( type_ )
type_ = declarations.remove_declarated( type_ )
class_traits = declarations.class_traits
class_declaration_traits = declarations.class_declaration_traits
if not class_traits.is_my_case( type_ ) and not class_declaration_traits.is_my_case( type_ ):
return False
if class_traits.is_my_case( type_ ):
container_cls = class_traits.get_declaration( type_ )
else:
container_cls = class_declaration_traits.get_declaration( type_ )
if None is container_cls.indexing_suite:
return False
try:
#check extraction of element type from container
container_cls.indexing_suite.element_type
except RuntimeError:
decls_logger = _logging_.loggers.declarations
if not messages.filter_disabled_msgs([messages.W1042], container_cls.disabled_messages ):
return #user disabled property warning
decls_logger.warn( "%s;%s" % ( container_cls, messages.W1042 ) )
self.__containers.add( container_cls )
return True
def _update_db( self, db, type_ ):
if self._update_containers_db( type_ ):
return
decl_string = self._normalize( declarations.base_type( type_ ).decl_string )
if not templates.is_instantiation( decl_string ):
return
if not self._is_relevant( decl_string ):
return
insts = filter( lambda inst: self._is_relevant_inst( inst[0], inst[1] )
, templates.split_recursive( decl_string ) )
for smart_ptr, args in insts:
assert len( args ) == 1
pointee = self._normalize( args[0] )
if not db.has_key(pointee):
db[ pointee ] = []
smart_ptr = self._normalize( smart_ptr )
if (smart_ptr, type_) not in db[pointee]:
db[ pointee ].append( (smart_ptr, type_) )
def _find_smart_ptrs( self, db, class_decl ):
decl_string = self._normalize( class_decl.decl_string )
if db.has_key( decl_string ):
return db[ decl_string ]
else:
return None
def create_holder( self, class_decl ):
#holder should be created when we pass object created in python
#as parameter to function in C++ that takes the smart pointer by reference
found = self._find_smart_ptrs( self.__arguments_types, class_decl )
if not found:
return None#not found or ambiguty
held_type = None
for smart_ptr, type_ in found:
if declarations.is_reference( type_ ) and not declarations.is_const( type_.base ):
temp = code_creators.held_type_t( smart_ptr=smart_ptr )
if not held_type or 'shared_ptr' in smart_ptr:
held_type = temp
return held_type
def _create_registrators_from_db( self, db, class_creator, registered ):
spregistrator_t = code_creators.smart_pointer_registrator_t
found = self._find_smart_ptrs( db, class_creator.declaration )
if not found:
return
for smart_ptr, type_ in found:
already_registered = filter( lambda registrator: registrator.smart_ptr == smart_ptr
, registered )
if not already_registered:
registered.append( spregistrator_t( smart_ptr=smart_ptr, class_creator=class_creator) )
def create_registrators( self, class_creator ):
"""
looks for all places where the class may be used as smart pointer.
If found, then creates :class:`code_creators.smart_pointer_registrator_t`
for that class and pointer type.
"""
spconverter_t = code_creators.smart_pointers_converter_t
registrators = []
dbs = [ self.__arguments_types, self.__return_types, self.__variables ]
for db in dbs:
self._create_registrators_from_db( db, class_creator, registrators )
if not class_creator.declaration.bases:
return registrators
# Add implicit converters from me to base classes and from derived classes to me
answer = []
for registrator in registrators:
answer.append( registrator )
decl = registrator.declaration
for hierarchy_info in decl.recursive_bases:
if hierarchy_info.access_type != declarations.ACCESS_TYPES.PRIVATE:
converter = spconverter_t( smart_ptr=registrator.smart_ptr
, source=class_creator.declaration
, target=hierarchy_info.related_class )
answer.append( converter )
for hierarchy_info in decl.recursive_derived:
if hierarchy_info.access_type != declarations.ACCESS_TYPES.PRIVATE:
converter = spconverter_t( smart_ptr=registrator.smart_ptr
, source=hierarchy_info.related_class
, target=class_creator.declaration )
answer.append( converter )
return answer
def _print_single_db(self, db):
for decl_string in db.keys():
print 'decl_string : ', decl_string
for smart_ptr, type_ in db[ decl_string ]:
print ' smart_ptr : ', smart_ptr
print ' type_ : ', type_.decl_string
def print_db( self ):
dbs = [ self.__arguments_types, self.__return_types, self.__variables ]
for db in dbs:
self._print_single_db( db )
def _get_used_containers( self ):
return self.__containers
used_containers = property( _get_used_containers)
| [
"roman_yakovenko@dc5859f9-2512-0410-ae5c-dd123cda1f76"
]
| roman_yakovenko@dc5859f9-2512-0410-ae5c-dd123cda1f76 |
547a53c86ba8b4cd0d676928ceea1cd8c29a5eb4 | 157d46852ea5788f1c7153b3ce10afb71d5a34c5 | /setup.py | d80136964004e5a4fc17965a395deeebde568d6d | []
| no_license | rctk/rctk.qx | 4475e0e0918df16a1d961a591d5d5d403a59ec23 | c1d510b2cd4404b5b9897e18b33c3d5b80813608 | refs/heads/master | 2020-05-19T13:50:17.871325 | 2011-09-13T20:16:51 | 2011-09-13T20:16:51 | 2,898,626 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 881 | py | from setuptools import setup, find_packages
import os
version = '1.0'
setup(name='rctk.qx',
version=version,
description="Qooxdoo frontend for RCTK",
long_description=open("README.txt").read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
# Get more strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Programming Language :: Python",
],
keywords='',
author='Ivo van der Wijk',
author_email='[email protected]',
url='http://rctk.org/',
license='BSD',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['rctk'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'rctk',
],
entry_points="""
# -*- Entry points: -*-
""",
)
| [
"[email protected]"
]
| |
a4ab3dfa2ec84f7f290cefd5efc56c798f2baf7b | 5fd4707876cac0a4ca3b14af9a936301c45b5599 | /03_字典和集合/fp_07_StrKeyDict0在查询的时候把非字符串的键转换为字符串.py | 0f65f3920a1e86a92cadfe6eba73b40dbd5364f7 | []
| no_license | xuelang201201/FluentPython | 5b0d89bfc6ee1238ad77db9955ec7e8417b418b8 | 7cbedf7c780c2a9e0edac60484f2ad4c385e1dbd | refs/heads/master | 2022-04-26T21:49:16.923214 | 2020-04-27T01:27:50 | 2020-04-27T01:27:50 | 258,290,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,297 | py | class StrKeyDict0(dict): # StrKeyDict0 继承了 dict。
def __missing__(self, key):
if isinstance(key, str): # 如果找不到的键本身就是字符串,那就抛出 KeyError 异常。
raise KeyError(key)
return self[str(key)] # 如果找不到的键不是字符串,那么把它转换成字符串再进行查找。
def get(self, key, default=None):
try:
# get 方法把查找工作用 self[key] 的形式委托给 __getitem__,
# 这样在宣布查找失败之前,还能通过 __missing__ 再给某个键一个机会。
return self[key]
except KeyError:
# 如果抛出 KeyError,那么说明 __missing__ 也失败了,于是返回 default。
return default
def __contains__(self, key):
# 先按照传入键的原本的值来查找(我们的映射类型中可能含有非字符串的键),
# 如果没找到,再用 str() 方法把键转换成字符串再查找一次。
return key in self.keys() or str(key) in self.keys()
d = StrKeyDict0([('2', 'two'), ('4', 'four')])
print(d['2'])
print(d[4])
# print(d[1])
print(d.get('2'))
print(d.get('4'))
print(d.get(1, 'N/A'))
print(2 in d)
print(1 in d)
| [
"[email protected]"
]
| |
91840a9b9599ca25ad5162852ade91602d46b6a3 | 9740b4cfc46b924c2e430472ce0522fa35813465 | /django_lender/settings.py | 87c2c6eaeeae5c3144315c44ebe1535b2a0dd0dd | [
"MIT"
]
| permissive | madelinepet/django_lender | d99f176b76d245ebcc0032f3304d677cd7a4fcfe | 5c6c342e8306e848e93db31a02efe9ef1299a401 | refs/heads/master | 2020-03-28T21:48:08.933512 | 2018-10-03T23:03:01 | 2018-10-03T23:03:01 | 149,183,702 | 0 | 0 | MIT | 2018-10-03T23:02:57 | 2018-09-17T20:26:20 | JavaScript | UTF-8 | Python | false | false | 4,217 | py | """
Django settings for django_lender project.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# If no secret key, do not run app.
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
# Defaults to False
DEBUG = os.environ.get('DEBUG', False)
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', '').split()
# Application definition
INSTALLED_APPS = [
'django_lender',
'lender_books',
'django.contrib.admin',
'django.contrib.auth',
'django_registration',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_lender.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_lender.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('DB_NAME', 'test_books'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASSWORD'),
'HOST': os.environ.get('DB_HOST', 'localhost'),
'PORT': os.environ.get('DB_PORT', 5432),
'TEST': {
'NAME': 'lender_test'
}
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
# CUSTOM SETTINGS GO DOWN HERE
# os.path business gets you to the root of your directory. For production!
# (While debug = True, django doesn't use static root)
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Image Upload settings
MEDIA_ROOT = '/static-assets/'
MEDIA_URL = '/static-assets/'
# Django Registration settings
ACCOUNT_ACTIVATION_DAYS = 1
LOGIN_REDIRECT_URL = '/'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER', '')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', '')
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
| [
"[email protected]"
]
| |
ab7516a56c5ad7146d01832178bc747e367073e6 | d6acd0cdfeba9d4777ae897e9edd225e5317d254 | /donate_anything/item/migrations/0001_initial.py | 03277750c0f3544c629f4322043237e2706ebdaf | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | fossabot/Donate-Anything | 680be213d90888bdf565e22214c1267fa5a8b84c | 335ef00f3d8bf76b8827d337b68c5ba0fc99bc64 | refs/heads/master | 2022-12-06T18:37:01.398381 | 2020-08-23T02:01:26 | 2020-08-23T02:01:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,315 | py | # Generated by Django 3.0.8 on 2020-07-17 23:12
import django.contrib.postgres.fields
import django.contrib.postgres.indexes
import django.db.models.deletion
from django.conf import settings
from django.contrib.postgres.operations import TrigramExtension
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
("charity", "0001_initial"),
]
operations = [
TrigramExtension(),
migrations.CreateModel(
name="Item",
fields=[
("id", models.BigAutoField(primary_key=True, serialize=False)),
("name", models.CharField(max_length=100, unique=True, db_index=True)),
("image", models.ImageField(blank=True, null=True, upload_to="")),
("is_appropriate", models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name="WantedItem",
fields=[
("id", models.BigAutoField(primary_key=True, serialize=False)),
(
"charity",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="charity.Charity",
),
),
(
"item",
models.ForeignKey(
db_index=False,
on_delete=django.db.models.deletion.CASCADE,
to="item.Item",
),
),
],
),
migrations.CreateModel(
name="ProposedItem",
fields=[
("id", models.BigAutoField(primary_key=True, serialize=False)),
(
"item",
django.contrib.postgres.fields.ArrayField(
base_field=models.BigIntegerField(), size=10000, default=list
),
),
(
"names",
django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=100),
size=1000,
default=list,
),
),
("closed", models.BooleanField(default=False)),
(
"entity",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="charity.Charity",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="Category",
fields=[
("id", models.BigAutoField(primary_key=True, serialize=False)),
(
"category",
models.PositiveSmallIntegerField(
choices=[
(0, "Financial"),
(1, "Clothing"),
(2, "Kitchenware"),
(3, "Books and Media"),
(4, "Toys and Games"),
(5, "Art"),
(6, "Hygiene"),
(7, "Sports"),
(8, "Furniture"),
(9, "Electronics"),
(10, "Internal Health"),
(11, "School Supplies"),
(12, "Linen"),
(13, "Recyclables"),
(14, "Compost"),
(15, "Food and Liquids"),
(16, "Miscellaneous"),
]
),
),
(
"charity",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="charity.Charity",
),
),
],
),
migrations.AddIndex(
model_name="item",
index=django.contrib.postgres.indexes.GinIndex(
fields=["name"],
name="item_name_sim_gin_index",
opclasses=("gin_trgm_ops",),
),
),
migrations.AddIndex(
model_name="wanteditem",
index=django.contrib.postgres.indexes.BrinIndex(
fields=["item"], name="item_wanted_item_id_b5f5d9_brin"
),
),
migrations.AddConstraint(
model_name="category",
constraint=models.UniqueConstraint(
fields=("charity", "category"), name="charity_supports_category"
),
),
migrations.AddConstraint(
model_name="wanteditem",
constraint=models.UniqueConstraint(
fields=("charity", "item"), name="charity_need_item"
),
),
]
| [
"[email protected]"
]
| |
65024f94ef36f76cc69ae7f6045e4016d5ab2984 | 71b8b60c5627ace1bbda39f679f93f60b55543ca | /tensorflow_federated/python/common_libs/golden.py | c9ac9996e2e44a95a545046401cfb6d448d6ab15 | [
"Apache-2.0"
]
| permissive | tensorflow/federated | ff94b63e9f4af448795bae77cee5b627dcae9051 | ad4bca66f4b483e09d8396e9948630813a343d27 | refs/heads/main | 2023-08-31T11:46:28.559047 | 2023-08-31T02:04:38 | 2023-08-31T02:09:59 | 161,556,784 | 2,297 | 631 | Apache-2.0 | 2023-09-13T22:54:14 | 2018-12-12T23:15:35 | Python | UTF-8 | Python | false | false | 5,056 | py | ## Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for asserting against golden files."""
import contextlib
import difflib
import io
import os.path
import re
import traceback
from typing import Optional
from absl import flags
_GOLDEN = flags.DEFINE_multi_string(
'golden', [], 'List of golden files available.'
)
_UPDATE_GOLDENS = flags.DEFINE_bool(
'update_goldens', False, 'Set true to update golden files.'
)
_VERBOSE = flags.DEFINE_bool(
'verbose', False, 'Set true to show golden diff output.'
)
FLAGS = flags.FLAGS
class MismatchedGoldenError(RuntimeError):
pass
_filename_to_golden_map: Optional[dict[str, str]] = None
def _filename_to_golden_path(filename: str) -> str:
"""Retrieve the `--golden` path flag for a golden file named `filename`."""
# Parse out all of the golden files once
global _filename_to_golden_map
if _filename_to_golden_map is None:
_filename_to_golden_map = {}
for golden_path in _GOLDEN.value:
name = os.path.basename(golden_path)
old_path = _filename_to_golden_map.get(name)
if old_path is not None and old_path != golden_path:
raise RuntimeError(
f'Multiple golden files provided for filename {name}:\n'
f'{old_path} and\n'
f'{golden_path}\n'
'Golden file names in the same test target must be unique.'
)
_filename_to_golden_map[name] = golden_path
if filename not in _filename_to_golden_map:
raise RuntimeError(f'No `--golden` files found with filename {filename}')
return _filename_to_golden_map[filename]
def check_string(filename: str, value: str):
"""Check that `value` matches the contents of the golden file `filename`."""
# Append a newline to the end of `value` to work around lint against
# text files with no trailing newline.
if not value.endswith('\n'):
value = value + '\n'
golden_path = _filename_to_golden_path(filename)
if _UPDATE_GOLDENS.value:
with open(golden_path, 'w') as f:
f.write(value)
return
with open(golden_path, 'r') as f:
golden_contents = f.read()
if value == golden_contents:
return
message = (
f'The contents of golden file {filename} '
'no longer match the current value.\n'
'To update the golden file, rerun this target with:\n'
'`--test_arg=--update_goldens --test_strategy=local`\n'
)
if _VERBOSE.value:
message += 'Full diff:\n'
split_value = value.split('\n')
split_golden = golden_contents.split('\n')
message += '\n'.join(difflib.unified_diff(split_golden, split_value))
else:
message += 'To see the full diff, rerun this target with:\n'
message += '`--test_arg=--verbose\n'
raise MismatchedGoldenError(message)
def traceback_string(exc_type, exc_value, tb) -> str:
"""Generates a standardized stringified version of an exception traceback."""
exception_string_io = io.StringIO()
traceback.print_exception(exc_type, exc_value, tb, file=exception_string_io)
exception_string = exception_string_io.getvalue()
# Strip path to TFF to normalize error messages
# First in filepaths.
without_filepath = re.sub(
r'\/\S*\/tensorflow_federated\/', '', exception_string
)
# Then also in class paths.
without_classpath = re.sub(
r'(\S*\.)+?(?=tensorflow_federated)', '', without_filepath
)
# Strip line numbers to avoid churn
without_linenumber = re.sub(r', line \d*', '', without_classpath)
return without_linenumber
def check_raises_traceback(
filename: str, exception: Exception
) -> contextlib.AbstractContextManager[None]:
"""Check for `exception` to be raised, generating a golden traceback."""
# Note: does not use `@contextlib.contextmanager` because that adds
# this function to the traceback.
return _TracebackManager(filename, exception)
class _TracebackManager:
"""Context manager for collecting tracebacks and comparing them to goldens."""
def __init__(self, filename, exception):
self._filename = filename
self._exception = exception
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, tb):
if not issubclass(exc_type, self._exception):
message = f'Exception `{self._exception.__name__}` was not thrown.'
if exc_value is not None:
message += f' A different exception was thrown: {exc_type.__name__}'
raise RuntimeError(message)
traceback_str = traceback_string(exc_type, exc_value, tb)
check_string(self._filename, traceback_str)
return True
| [
"[email protected]"
]
| |
8e9b0d12dfa10221e6bf87007325c95fd41832ef | 717558d6a075163294054bd5aea4ef3234df23ad | /models_all/smallinvDAXr3b020-022.py | 7d25b16346043224265235b0565d6453c396bb5e | [
"MIT"
]
| permissive | RomeoV/pyomo-MINLP-benchmarking | 1270766397fbc4e57ea1bd0c2285fb7edf64062d | 996d2c8ee1cb9b03fe00c6246f52294337d8b92c | refs/heads/master | 2021-07-11T17:54:25.284712 | 2020-08-13T23:43:14 | 2020-08-13T23:43:14 | 185,664,992 | 8 | 1 | MIT | 2019-05-10T19:07:05 | 2019-05-08T19:09:05 | Python | UTF-8 | Python | false | false | 18,361 | py | # MINLP written by GAMS Convert at 05/15/20 00:51:19
#
# Equation counts
# Total E G L N X C B
# 4 0 2 2 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 31 1 0 30 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 121 91 30 0
from pyomo.environ import *
model = m = ConcreteModel()
m.i1 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i2 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i3 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i4 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i5 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i6 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i7 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i8 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i9 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i10 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i11 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i12 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i13 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i14 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i15 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i16 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i17 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i18 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i19 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i20 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i21 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i22 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i23 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i24 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i25 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i26 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i27 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i28 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i29 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i30 = Var(within=Integers,bounds=(0,100),initialize=0)
m.x31 = Var(within=Reals,bounds=(None,None),initialize=0)
m.obj = Objective(expr=m.x31, sense=minimize)
m.c1 = Constraint(expr=0.00558391*m.i1**2 + 0.0103737*m.i2**2 + 0.0221075*m.i3**2 + 0.00399551*m.i4**2 + 0.00267064*m.i5
**2 + 0.00516451*m.i6**2 + 0.00421051*m.i7**2 + 0.00368008*m.i8**2 + 0.00372788*m.i9**2 +
0.00668969*m.i10**2 + 0.00613034*m.i11**2 + 0.0129271*m.i12**2 + 0.00697595*m.i13**2 + 0.0104282*
m.i14**2 + 0.00899168*m.i15**2 + 0.0206685*m.i16**2 + 0.0488863*m.i17**2 + 0.00894867*m.i18**2 +
0.0124118*m.i19**2 + 0.0122291*m.i20**2 + 0.0128973*m.i21**2 + 0.00668043*m.i22**2 + 0.0153425*
m.i23**2 + 0.0128605*m.i24**2 + 0.00718697*m.i25**2 + 0.0102616*m.i26**2 + 0.0123235*m.i27**2 +
0.00569465*m.i28**2 + 0.00818114*m.i29**2 + 0.00469456*m.i30**2 + 0.00901964*m.i1*m.i2 +
0.00860972*m.i1*m.i3 + 0.00248004*m.i1*m.i4 + 0.001821292*m.i1*m.i5 + 0.00561078*m.i1*m.i6 +
0.0051287*m.i1*m.i7 + 0.000691196*m.i1*m.i8 + 0.000805982*m.i1*m.i9 + 0.00531452*m.i1*m.i10 +
0.00556768*m.i1*m.i11 + 0.00745224*m.i1*m.i12 + 0.00478224*m.i1*m.i13 + 0.00610824*m.i1*m.i14 +
0.00577634*m.i1*m.i15 + 0.00613326*m.i1*m.i16 + 0.01432596*m.i1*m.i17 + 0.007501*m.i1*m.i18 +
0.00716212*m.i1*m.i19 + 0.00512922*m.i1*m.i20 + 0.0087283*m.i1*m.i21 + 0.00245846*m.i1*m.i22 +
0.0071572*m.i1*m.i23 + 0.00543966*m.i1*m.i24 + 0.00708258*m.i1*m.i25 + 0.00243422*m.i1*m.i26 +
0.00729094*m.i1*m.i27 + 0.00386642*m.i1*m.i28 + 0.0061908*m.i1*m.i29 + 0.00366754*m.i1*m.i30 +
0.01583972*m.i2*m.i3 + 0.00394608*m.i2*m.i4 + 0.001773554*m.i2*m.i5 + 0.00861376*m.i2*m.i6 +
0.00604454*m.i2*m.i7 + 0.00312866*m.i2*m.i8 + 0.00184686*m.i2*m.i9 + 0.00924638*m.i2*m.i10 +
0.01131902*m.i2*m.i11 + 0.01253232*m.i2*m.i12 + 0.00675858*m.i2*m.i13 + 0.00804604*m.i2*m.i14 +
0.00869872*m.i2*m.i15 + 0.0094047*m.i2*m.i16 + 0.0251538*m.i2*m.i17 + 0.01321532*m.i2*m.i18 +
0.01127964*m.i2*m.i19 + 0.0096635*m.i2*m.i20 + 0.0160783*m.i2*m.i21 + 0.00271*m.i2*m.i22 +
0.01486022*m.i2*m.i23 + 0.01091018*m.i2*m.i24 + 0.01009426*m.i2*m.i25 + 0.00754144*m.i2*m.i26 +
0.01408844*m.i2*m.i27 + 0.00544162*m.i2*m.i28 + 0.01096178*m.i2*m.i29 + 0.00574964*m.i2*m.i30 +
0.00299428*m.i3*m.i4 + 0.001239314*m.i3*m.i5 + 0.01256412*m.i3*m.i6 + 0.00899714*m.i3*m.i7 +
0.00444448*m.i3*m.i8 + 0.00616612*m.i3*m.i9 + 0.0146019*m.i3*m.i10 + 0.01249836*m.i3*m.i11 +
0.0264968*m.i3*m.i12 + 0.01266506*m.i3*m.i13 + 0.01358566*m.i3*m.i14 + 0.01419766*m.i3*m.i15 +
0.01033796*m.i3*m.i16 + 0.040104*m.i3*m.i17 + 0.01504214*m.i3*m.i18 + 0.0210518*m.i3*m.i19 +
0.0169342*m.i3*m.i20 + 0.020394*m.i3*m.i21 + 0.006361*m.i3*m.i22 + 0.0173249*m.i3*m.i23 +
0.01157254*m.i3*m.i24 + 0.01601196*m.i3*m.i25 + 0.01305808*m.i3*m.i26 + 0.018918*m.i3*m.i27 +
0.0100768*m.i3*m.i28 + 0.01415258*m.i3*m.i29 + 0.00890208*m.i3*m.i30 + 0.00365082*m.i4*m.i5 +
0.0031533*m.i4*m.i6 + 0.001664882*m.i4*m.i7 + 0.000487746*m.i4*m.i8 + 0.00074873*m.i4*m.i9 +
0.00279536*m.i4*m.i10 + 0.000948078*m.i4*m.i11 + 0.00218644*m.i4*m.i12 + 0.001471884*m.i4*m.i13
+ 0.001764448*m.i4*m.i14 + 0.001707856*m.i4*m.i15 + 0.00415534*m.i4*m.i16 + 0.00552118*m.i4*
m.i17 + 0.00298928*m.i4*m.i18 + 0.000446818*m.i4*m.i19 + 0.0042709*m.i4*m.i20 + 0.00437068*m.i4*
m.i21 + 0.001584414*m.i4*m.i22 + 0.0028495*m.i4*m.i23 + 0.00550266*m.i4*m.i24 + 0.0019381*m.i4*
m.i25 - 0.000779792*m.i4*m.i26 + 0.00383714*m.i4*m.i27 + 0.00170793*m.i4*m.i28 + 0.00220852*m.i4*
m.i29 + 0.001897386*m.i4*m.i30 + 0.00226608*m.i5*m.i6 + 0.001391572*m.i5*m.i7 + 0.001434726*m.i5*
m.i8 + 0.000718962*m.i5*m.i9 + 0.00117417*m.i5*m.i10 + 0.001240914*m.i5*m.i11 + 0.000587866*m.i5*
m.i12 + 0.0020154*m.i5*m.i13 + 0.00126883*m.i5*m.i14 + 0.000645164*m.i5*m.i15 + 0.0001425196*m.i5
*m.i16 + 0.001199014*m.i5*m.i17 + 0.001896292*m.i5*m.i18 - 0.000289412*m.i5*m.i19 + 0.001457998*
m.i5*m.i20 + 0.00199702*m.i5*m.i21 + 0.001266598*m.i5*m.i22 + 0.000764624*m.i5*m.i23 +
0.001961312*m.i5*m.i24 + 0.001748826*m.i5*m.i25 - 0.00122625*m.i5*m.i26 + 0.000753266*m.i5*m.i27
+ 0.00063941*m.i5*m.i28 + 0.001644068*m.i5*m.i29 + 0.001587886*m.i5*m.i30 + 0.00454154*m.i6*m.i7
+ 0.001157686*m.i6*m.i8 + 0.0032018*m.i6*m.i9 + 0.00727798*m.i6*m.i10 + 0.0064553*m.i6*m.i11 +
0.00791618*m.i6*m.i12 + 0.00687526*m.i6*m.i13 + 0.00638032*m.i6*m.i14 + 0.00425538*m.i6*m.i15 +
0.00583332*m.i6*m.i16 + 0.01491304*m.i6*m.i17 + 0.00876772*m.i6*m.i18 + 0.00814434*m.i6*m.i19 +
0.00549208*m.i6*m.i20 + 0.0103848*m.i6*m.i21 + 0.001352278*m.i6*m.i22 + 0.0063097*m.i6*m.i23 +
0.0052012*m.i6*m.i24 + 0.00808494*m.i6*m.i25 + 0.00595234*m.i6*m.i26 + 0.00960786*m.i6*m.i27 +
0.0035648*m.i6*m.i28 + 0.00730486*m.i6*m.i29 + 0.0036145*m.i6*m.i30 + 0.0027426*m.i7*m.i8 +
0.00224138*m.i7*m.i9 + 0.00558948*m.i7*m.i10 + 0.00489378*m.i7*m.i11 + 0.0073565*m.i7*m.i12 +
0.0050794*m.i7*m.i13 + 0.00363244*m.i7*m.i14 + 0.00634576*m.i7*m.i15 + 0.001588982*m.i7*m.i16 +
0.00877926*m.i7*m.i17 + 0.00710862*m.i7*m.i18 + 0.00675396*m.i7*m.i19 + 0.00621206*m.i7*m.i20 +
0.00746652*m.i7*m.i21 + 0.001927036*m.i7*m.i22 + 0.00410122*m.i7*m.i23 + 0.00344774*m.i7*m.i24 +
0.00594546*m.i7*m.i25 + 0.00461784*m.i7*m.i26 + 0.00530234*m.i7*m.i27 + 0.00320122*m.i7*m.i28 +
0.00474356*m.i7*m.i29 + 0.00341222*m.i7*m.i30 + 0.00105347*m.i8*m.i9 + 0.001879822*m.i8*m.i10 +
0.00290244*m.i8*m.i11 + 0.00353818*m.i8*m.i12 + 0.0035513*m.i8*m.i13 + 0.00294406*m.i8*m.i14 +
0.00389942*m.i8*m.i15 + 0.00286866*m.i8*m.i16 + 0.000920126*m.i8*m.i17 + 0.00274282*m.i8*m.i18 +
0.0027675*m.i8*m.i19 + 0.00464592*m.i8*m.i20 + 0.001093444*m.i8*m.i21 + 0.000948594*m.i8*m.i22 +
0.00275316*m.i8*m.i23 + 0.001626794*m.i8*m.i24 + 0.00209498*m.i8*m.i25 + 0.0031962*m.i8*m.i26 +
0.001767658*m.i8*m.i27 + 0.00109948*m.i8*m.i28 + 0.00292004*m.i8*m.i29 + 0.00215496*m.i8*m.i30 +
0.00329222*m.i9*m.i10 + 0.00239978*m.i9*m.i11 + 0.00365066*m.i9*m.i12 + 0.00463422*m.i9*m.i13 +
0.00260888*m.i9*m.i14 + 0.00330432*m.i9*m.i15 + 0.000950274*m.i9*m.i16 + 0.00309664*m.i9*m.i17 +
0.00325462*m.i9*m.i18 + 0.00494078*m.i9*m.i19 + 0.00339202*m.i9*m.i20 + 0.00283784*m.i9*m.i21 +
0.001862472*m.i9*m.i22 + 0.001457294*m.i9*m.i23 + 0.000292408*m.i9*m.i24 + 0.00434258*m.i9*m.i25
+ 0.0051917*m.i9*m.i26 + 0.00442724*m.i9*m.i27 + 0.00235362*m.i9*m.i28 + 0.0023207*m.i9*m.i29 +
0.00232972*m.i9*m.i30 + 0.00661128*m.i10*m.i11 + 0.0099349*m.i10*m.i12 + 0.00670728*m.i10*m.i13
+ 0.00688756*m.i10*m.i14 + 0.00814804*m.i10*m.i15 + 0.00387536*m.i10*m.i16 + 0.01709622*m.i10*
m.i17 + 0.00921546*m.i10*m.i18 + 0.01138012*m.i10*m.i19 + 0.0073598*m.i10*m.i20 + 0.012047*m.i10*
m.i21 + 0.001953884*m.i10*m.i22 + 0.01110682*m.i10*m.i23 + 0.00744232*m.i10*m.i24 + 0.00846572*
m.i10*m.i25 + 0.00811902*m.i10*m.i26 + 0.01093528*m.i10*m.i27 + 0.00642736*m.i10*m.i28 +
0.00817838*m.i10*m.i29 + 0.00467066*m.i10*m.i30 + 0.01089978*m.i11*m.i12 + 0.00580646*m.i11*m.i13
+ 0.00479126*m.i11*m.i14 + 0.00655088*m.i11*m.i15 + 0.00784072*m.i11*m.i16 + 0.0171429*m.i11*
m.i17 + 0.0099023*m.i11*m.i18 + 0.00881158*m.i11*m.i19 + 0.0065332*m.i11*m.i20 + 0.01111462*m.i11
*m.i21 + 0.00238226*m.i11*m.i22 + 0.00942038*m.i11*m.i23 + 0.00509366*m.i11*m.i24 + 0.0079177*
m.i11*m.i25 + 0.00653764*m.i11*m.i26 + 0.00963386*m.i11*m.i27 + 0.00518254*m.i11*m.i28 +
0.00839924*m.i11*m.i29 + 0.00396162*m.i11*m.i30 + 0.00812884*m.i12*m.i13 + 0.00932748*m.i12*m.i14
+ 0.01172114*m.i12*m.i15 + 0.00937084*m.i12*m.i16 + 0.033621*m.i12*m.i17 + 0.0125625*m.i12*m.i18
+ 0.01635358*m.i12*m.i19 + 0.01460644*m.i12*m.i20 + 0.01374474*m.i12*m.i21 + 0.00526496*m.i12*
m.i22 + 0.01402198*m.i12*m.i23 + 0.00931776*m.i12*m.i24 + 0.01195866*m.i12*m.i25 + 0.00822682*
m.i12*m.i26 + 0.01241788*m.i12*m.i27 + 0.00706034*m.i12*m.i28 + 0.01219462*m.i12*m.i29 +
0.00598988*m.i12*m.i30 + 0.0068538*m.i13*m.i14 + 0.00620178*m.i13*m.i15 + 0.00379406*m.i13*m.i16
+ 0.00889862*m.i13*m.i17 + 0.00816594*m.i13*m.i18 + 0.01033824*m.i13*m.i19 + 0.00577162*m.i13*
m.i20 + 0.00736548*m.i13*m.i21 + 0.00410776*m.i13*m.i22 + 0.00580558*m.i13*m.i23 + 0.00459074*
m.i13*m.i24 + 0.0072167*m.i13*m.i25 + 0.00956086*m.i13*m.i26 + 0.00943468*m.i13*m.i27 +
0.00587164*m.i13*m.i28 + 0.00902842*m.i13*m.i29 + 0.00550608*m.i13*m.i30 + 0.00635356*m.i14*m.i15
+ 0.00709628*m.i14*m.i16 + 0.01555038*m.i14*m.i17 + 0.00826722*m.i14*m.i18 + 0.00751614*m.i14*
m.i19 + 0.00814342*m.i14*m.i20 + 0.00995652*m.i14*m.i21 + 0.00477798*m.i14*m.i22 + 0.0076843*
m.i14*m.i23 + 0.00817698*m.i14*m.i24 + 0.00886056*m.i14*m.i25 + 0.00579636*m.i14*m.i26 +
0.01128084*m.i14*m.i27 + 0.00483444*m.i14*m.i28 + 0.0068342*m.i14*m.i29 + 0.0077372*m.i14*m.i30
+ 0.00973548*m.i15*m.i16 + 0.01556958*m.i15*m.i17 + 0.00926266*m.i15*m.i18 + 0.01281188*m.i15*
m.i19 + 0.00669072*m.i15*m.i20 + 0.00937684*m.i15*m.i21 + 0.00639856*m.i15*m.i22 + 0.00611934*
m.i15*m.i23 + 0.00853942*m.i15*m.i24 + 0.00964296*m.i15*m.i25 + 0.00704584*m.i15*m.i26 +
0.0119279*m.i15*m.i27 + 0.00648174*m.i15*m.i28 + 0.01050128*m.i15*m.i29 + 0.00502696*m.i15*m.i30
+ 0.01809222*m.i16*m.i17 + 0.00823288*m.i16*m.i18 + 0.01161214*m.i16*m.i19 + 0.00533676*m.i16*
m.i20 + 0.01233794*m.i16*m.i21 + 0.00512778*m.i16*m.i22 + 0.00722276*m.i16*m.i23 + 0.01715638*
m.i16*m.i24 + 0.00677738*m.i16*m.i25 + 0.0069565*m.i16*m.i26 + 0.01691522*m.i16*m.i27 +
0.00246824*m.i16*m.i28 + 0.00934088*m.i16*m.i29 + 0.00393866*m.i16*m.i30 + 0.01858542*m.i17*m.i18
+ 0.0224912*m.i17*m.i19 + 0.01793624*m.i17*m.i20 + 0.0270204*m.i17*m.i21 + 0.01083832*m.i17*
m.i22 + 0.0216678*m.i17*m.i23 + 0.0183347*m.i17*m.i24 + 0.01893*m.i17*m.i25 + 0.01089098*m.i17*
m.i26 + 0.0209142*m.i17*m.i27 + 0.01273162*m.i17*m.i28 + 0.0200902*m.i17*m.i29 + 0.00774366*m.i17
*m.i30 + 0.01171594*m.i18*m.i19 + 0.00861454*m.i18*m.i20 + 0.01414322*m.i18*m.i21 + 0.001961404*
m.i18*m.i22 + 0.00910214*m.i18*m.i23 + 0.01003468*m.i18*m.i24 + 0.0094743*m.i18*m.i25 +
0.00825794*m.i18*m.i26 + 0.01336058*m.i18*m.i27 + 0.00607998*m.i18*m.i28 + 0.01070732*m.i18*m.i29
+ 0.00492858*m.i18*m.i30 + 0.0082848*m.i19*m.i20 + 0.0126004*m.i19*m.i21 + 0.00407366*m.i19*
m.i22 + 0.01381284*m.i19*m.i23 + 0.00838908*m.i19*m.i24 + 0.01198264*m.i19*m.i25 + 0.01583126*
m.i19*m.i26 + 0.01664044*m.i19*m.i27 + 0.00924324*m.i19*m.i28 + 0.01214842*m.i19*m.i29 +
0.00592778*m.i19*m.i30 + 0.01071434*m.i20*m.i21 + 0.00296964*m.i20*m.i22 + 0.00736528*m.i20*m.i23
+ 0.00606396*m.i20*m.i24 + 0.00628822*m.i20*m.i25 + 0.00817696*m.i20*m.i26 + 0.00776894*m.i20*
m.i27 + 0.0026202*m.i20*m.i28 + 0.00717342*m.i20*m.i29 + 0.00579184*m.i20*m.i30 + 0.00469936*
m.i21*m.i22 + 0.0138599*m.i21*m.i23 + 0.0125037*m.i21*m.i24 + 0.01211002*m.i21*m.i25 + 0.00836436
*m.i21*m.i26 + 0.016494*m.i21*m.i27 + 0.00602872*m.i21*m.i28 + 0.01180462*m.i21*m.i29 +
0.00570478*m.i21*m.i30 + 0.0032176*m.i22*m.i23 + 0.00379112*m.i22*m.i24 + 0.00301976*m.i22*m.i25
+ 0.00308424*m.i22*m.i26 + 0.00369962*m.i22*m.i27 + 0.00278784*m.i22*m.i28 + 0.00465846*m.i22*
m.i29 + 0.00297212*m.i22*m.i30 + 0.01019176*m.i23*m.i24 + 0.00779098*m.i23*m.i25 + 0.00577776*
m.i23*m.i26 + 0.01267514*m.i23*m.i27 + 0.00735432*m.i23*m.i28 + 0.00786386*m.i23*m.i29 +
0.00559972*m.i23*m.i30 + 0.00725022*m.i24*m.i25 + 0.00455648*m.i24*m.i26 + 0.0157223*m.i24*m.i27
+ 0.00579512*m.i24*m.i28 + 0.00792398*m.i24*m.i29 + 0.0045755*m.i24*m.i30 + 0.00723442*m.i25*
m.i26 + 0.01196012*m.i25*m.i27 + 0.0063273*m.i25*m.i28 + 0.0099815*m.i25*m.i29 + 0.0041794*m.i25*
m.i30 + 0.01139894*m.i26*m.i27 + 0.0080092*m.i26*m.i28 + 0.0080044*m.i26*m.i29 + 0.00493602*m.i26
*m.i30 + 0.00826208*m.i27*m.i28 + 0.01246152*m.i27*m.i29 + 0.0067556*m.i27*m.i30 + 0.00575648*
m.i28*m.i29 + 0.0044929*m.i28*m.i30 + 0.00469952*m.i29*m.i30 - m.x31 <= 0)
m.c2 = Constraint(expr= 0.00418236*m.i1 + 0.00084825*m.i2 - 0.00595114*m.i3 + 0.013626*m.i4 + 0.0104202*m.i5
+ 0.00696825*m.i6 + 0.00454677*m.i7 - 0.00034318*m.i8 - 0.00570481*m.i9 - 0.00023195*m.i10
+ 0.00048701*m.i11 - 0.00248068*m.i12 + 0.00586372*m.i13 - 0.00923462*m.i14 - 0.00111219*m.i15
+ 0.00557072*m.i16 - 0.00728342*m.i17 - 0.00068175*m.i18 - 0.00431856*m.i19 - 0.00011388*m.i20
+ 0.0152599*m.i21 + 0.00645572*m.i22 + 0.0087442*m.i23 + 0.0205808*m.i24 + 0.00360132*m.i25
- 0.00424596*m.i26 + 0.00406*m.i27 + 0.00429918*m.i28 + 0.00365023*m.i29 + 0.0055383*m.i30 >= 0)
m.c3 = Constraint(expr= 39.19*m.i1 + 41.47*m.i2 + 5.71*m.i3 + 53.59*m.i4 + 43.65*m.i5 + 85.46*m.i6 + 39.7*m.i7
+ 44.91*m.i8 + 9.6*m.i9 + 11.26*m.i10 + 39.56*m.i11 + 46*m.i12 + 45.25*m.i13 + 21.9*m.i14
+ 11.85*m.i15 + 37.4*m.i16 + 4.75*m.i17 + 44.44*m.i18 + 80.5*m.i19 + 49.46*m.i20 + 67.02*m.i21
+ 59.25*m.i22 + 71.5*m.i23 + 48.8*m.i24 + 73.22*m.i25 + 101.9*m.i26 + 20.06*m.i27 + 36.33*m.i28
+ 41.31*m.i29 + 53.09*m.i30 >= 2000)
m.c4 = Constraint(expr= 39.19*m.i1 + 41.47*m.i2 + 5.71*m.i3 + 53.59*m.i4 + 43.65*m.i5 + 85.46*m.i6 + 39.7*m.i7
+ 44.91*m.i8 + 9.6*m.i9 + 11.26*m.i10 + 39.56*m.i11 + 46*m.i12 + 45.25*m.i13 + 21.9*m.i14
+ 11.85*m.i15 + 37.4*m.i16 + 4.75*m.i17 + 44.44*m.i18 + 80.5*m.i19 + 49.46*m.i20 + 67.02*m.i21
+ 59.25*m.i22 + 71.5*m.i23 + 48.8*m.i24 + 73.22*m.i25 + 101.9*m.i26 + 20.06*m.i27 + 36.33*m.i28
+ 41.31*m.i29 + 53.09*m.i30 <= 2200)
| [
"[email protected]"
]
| |
f1b2aa99e2b8319ca529ea567e0c1e9cb8a1641d | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/quickFixes/PyMoveAttributeToInitQuickFixTest/skipDocstring.py | a8ba90afa2a9d2800cf229b2f9006bacf32f6e57 | [
"Apache-2.0"
]
| permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 114 | py | __author__ = 'ktisha'
class A:
"""
class docstring
"""
def foo(self):
self.<caret>b = 1 | [
"[email protected]"
]
| |
0f1051fd72184d9077c9d9897e77a5275a0a20c4 | 4b44a299bafbd4ca408ce1c89c9fe4a449632783 | /python2/10_Modules/user_defined_modules/__init__.py | 7fdaa3a68f66c2f95f16c3fee4fa12b8cd44c24e | []
| no_license | umunusb1/PythonMaterial | ecd33d32b2de664eaaae5192be7c3f6d6bef1d67 | 1e0785c55ccb8f5b9df1978e1773365a29479ce0 | refs/heads/master | 2023-01-23T23:39:35.797800 | 2020-12-02T19:29:00 | 2020-12-02T19:29:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33 | py | import fibscript
import newScript | [
"[email protected]"
]
| |
b168f4b1683a0a19436abc505deb186004b85a6c | 94dcccac732db506a1066a83961554842ca50d0b | /weather/urls.py | 55c116563a1e827708236748f015e45215bbb1d8 | []
| no_license | PickertJoe/personal_website | 69e7d2d6c1065686fe917c7af0e836b193cab612 | 3c61e2847fd375eb99fcb23b2c9ad69ca73b1cf3 | refs/heads/master | 2022-12-13T08:10:53.177963 | 2022-03-05T01:59:43 | 2022-03-05T01:59:43 | 191,227,416 | 0 | 0 | null | 2022-11-22T04:45:56 | 2019-06-10T18:51:07 | Python | UTF-8 | Python | false | false | 201 | py | from django.urls import path
from . import views
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('', views.weather_index, name='weather_index'),
]
| [
"[email protected]"
]
| |
fb2457b8cd16c4da1386acae87a20181fa49bac1 | 505cd95efe18fc92eb82ce0a72ac219b1ac5b19c | /uiExtension.py | 3d40e34416fd839ef392fdb207bbd3e98acc9be3 | []
| no_license | alexsilva/joomla-tools | 169523fa1efe5f531bd5d08f27b67f2547b171ee | 0b971618cee6b7784b47ab55f78df24329bf04c2 | refs/heads/master | 2021-01-10T14:24:43.391722 | 2020-06-18T17:41:20 | 2020-06-18T17:41:20 | 8,543,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,465 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'extension.ui'
#
# Created: Sat Mar 30 17:31:35 2013
# by: pyside-uic 0.2.14 running on PySide 1.1.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1155, 742)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("media/joomla_logo_black.bmp"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.groupBox = QtGui.QGroupBox(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Segoe Script")
font.setPointSize(8)
self.groupBox.setFont(font)
self.groupBox.setObjectName("groupBox")
self.verticalLayout_4 = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.frame_3 = QtGui.QFrame(self.groupBox)
self.frame_3.setFrameShape(QtGui.QFrame.Box)
self.frame_3.setFrameShadow(QtGui.QFrame.Sunken)
self.frame_3.setObjectName("frame_3")
self.horizontalLayout = QtGui.QHBoxLayout(self.frame_3)
self.horizontalLayout.setObjectName("horizontalLayout")
self.joomlaChoosePath = QtGui.QToolButton(self.frame_3)
font = QtGui.QFont()
font.setFamily("MV Boli")
self.joomlaChoosePath.setFont(font)
self.joomlaChoosePath.setObjectName("joomlaChoosePath")
self.horizontalLayout.addWidget(self.joomlaChoosePath)
self.joomlaPath = QtGui.QLineEdit(self.frame_3)
font = QtGui.QFont()
font.setFamily("Arial")
self.joomlaPath.setFont(font)
self.joomlaPath.setReadOnly(True)
self.joomlaPath.setObjectName("joomlaPath")
self.horizontalLayout.addWidget(self.joomlaPath)
self.verticalLayout_4.addWidget(self.frame_3)
self.verticalLayout.addWidget(self.groupBox)
self.groupBox_2 = QtGui.QGroupBox(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Segoe Script")
font.setPointSize(8)
self.groupBox_2.setFont(font)
self.groupBox_2.setObjectName("groupBox_2")
self.horizontalLayout_7 = QtGui.QHBoxLayout(self.groupBox_2)
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.frame_4 = QtGui.QFrame(self.groupBox_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_4.sizePolicy().hasHeightForWidth())
self.frame_4.setSizePolicy(sizePolicy)
self.frame_4.setFrameShape(QtGui.QFrame.Box)
self.frame_4.setFrameShadow(QtGui.QFrame.Sunken)
self.frame_4.setObjectName("frame_4")
self.horizontalLayout_4 = QtGui.QHBoxLayout(self.frame_4)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.label = QtGui.QLabel(self.frame_4)
font = QtGui.QFont()
font.setFamily("Segoe Script")
self.label.setFont(font)
self.label.setObjectName("label")
self.horizontalLayout_4.addWidget(self.label)
self.componentName = QtGui.QLineEdit(self.frame_4)
font = QtGui.QFont()
font.setFamily("Arial")
self.componentName.setFont(font)
self.componentName.setObjectName("componentName")
self.horizontalLayout_4.addWidget(self.componentName)
self.horizontalLayout_7.addWidget(self.frame_4)
self.frame_2 = QtGui.QFrame(self.groupBox_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(3)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_2.sizePolicy().hasHeightForWidth())
self.frame_2.setSizePolicy(sizePolicy)
self.frame_2.setFrameShape(QtGui.QFrame.Box)
self.frame_2.setFrameShadow(QtGui.QFrame.Sunken)
self.frame_2.setObjectName("frame_2")
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.frame_2)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.componentChoosePath = QtGui.QToolButton(self.frame_2)
font = QtGui.QFont()
font.setFamily("MV Boli")
self.componentChoosePath.setFont(font)
self.componentChoosePath.setObjectName("componentChoosePath")
self.horizontalLayout_2.addWidget(self.componentChoosePath)
self.componentPath = QtGui.QLineEdit(self.frame_2)
font = QtGui.QFont()
font.setFamily("Arial")
self.componentPath.setFont(font)
self.componentPath.setReadOnly(True)
self.componentPath.setObjectName("componentPath")
self.horizontalLayout_2.addWidget(self.componentPath)
self.componetZip = QtGui.QPushButton(self.frame_2)
self.componetZip.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("media/zip.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.componetZip.setIcon(icon1)
self.componetZip.setObjectName("componetZip")
self.horizontalLayout_2.addWidget(self.componetZip)
self.horizontalLayout_7.addWidget(self.frame_2)
self.verticalLayout.addWidget(self.groupBox_2)
self.groupBox_3 = QtGui.QGroupBox(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_3.sizePolicy().hasHeightForWidth())
self.groupBox_3.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Segoe Script")
font.setPointSize(8)
self.groupBox_3.setFont(font)
self.groupBox_3.setObjectName("groupBox_3")
self.horizontalLayout_8 = QtGui.QHBoxLayout(self.groupBox_3)
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.frame_5 = QtGui.QFrame(self.groupBox_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_5.sizePolicy().hasHeightForWidth())
self.frame_5.setSizePolicy(sizePolicy)
self.frame_5.setFrameShape(QtGui.QFrame.Box)
self.frame_5.setFrameShadow(QtGui.QFrame.Sunken)
self.frame_5.setObjectName("frame_5")
self.horizontalLayout_5 = QtGui.QHBoxLayout(self.frame_5)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.label_2 = QtGui.QLabel(self.frame_5)
font = QtGui.QFont()
font.setFamily("Segoe Print")
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.horizontalLayout_5.addWidget(self.label_2)
self.pluginName = QtGui.QLineEdit(self.frame_5)
font = QtGui.QFont()
font.setFamily("Arial")
self.pluginName.setFont(font)
self.pluginName.setObjectName("pluginName")
self.horizontalLayout_5.addWidget(self.pluginName)
self.horizontalLayout_8.addWidget(self.frame_5)
self.frame = QtGui.QFrame(self.groupBox_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(3)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame.sizePolicy().hasHeightForWidth())
self.frame.setSizePolicy(sizePolicy)
self.frame.setFrameShape(QtGui.QFrame.Box)
self.frame.setFrameShadow(QtGui.QFrame.Sunken)
self.frame.setObjectName("frame")
self.horizontalLayout_3 = QtGui.QHBoxLayout(self.frame)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.pluginChoosePath = QtGui.QToolButton(self.frame)
font = QtGui.QFont()
font.setFamily("MV Boli")
self.pluginChoosePath.setFont(font)
self.pluginChoosePath.setObjectName("pluginChoosePath")
self.horizontalLayout_3.addWidget(self.pluginChoosePath)
self.pluginPath = QtGui.QLineEdit(self.frame)
font = QtGui.QFont()
font.setFamily("Arial")
self.pluginPath.setFont(font)
self.pluginPath.setReadOnly(True)
self.pluginPath.setObjectName("pluginPath")
self.horizontalLayout_3.addWidget(self.pluginPath)
self.pluginZip = QtGui.QPushButton(self.frame)
self.pluginZip.setText("")
self.pluginZip.setIcon(icon1)
self.pluginZip.setObjectName("pluginZip")
self.horizontalLayout_3.addWidget(self.pluginZip)
self.horizontalLayout_8.addWidget(self.frame)
self.verticalLayout.addWidget(self.groupBox_3)
self.groupBox_6 = QtGui.QGroupBox(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Segoe Script")
self.groupBox_6.setFont(font)
self.groupBox_6.setObjectName("groupBox_6")
self.horizontalLayout_12 = QtGui.QHBoxLayout(self.groupBox_6)
self.horizontalLayout_12.setObjectName("horizontalLayout_12")
self.frame_9 = QtGui.QFrame(self.groupBox_6)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_9.sizePolicy().hasHeightForWidth())
self.frame_9.setSizePolicy(sizePolicy)
self.frame_9.setFrameShape(QtGui.QFrame.Box)
self.frame_9.setFrameShadow(QtGui.QFrame.Sunken)
self.frame_9.setObjectName("frame_9")
self.horizontalLayout_13 = QtGui.QHBoxLayout(self.frame_9)
self.horizontalLayout_13.setObjectName("horizontalLayout_13")
self.label_5 = QtGui.QLabel(self.frame_9)
self.label_5.setObjectName("label_5")
self.horizontalLayout_13.addWidget(self.label_5)
self.moduleName = QtGui.QLineEdit(self.frame_9)
font = QtGui.QFont()
font.setFamily("Arial")
font.setWeight(50)
font.setBold(False)
self.moduleName.setFont(font)
self.moduleName.setObjectName("moduleName")
self.horizontalLayout_13.addWidget(self.moduleName)
self.horizontalLayout_12.addWidget(self.frame_9)
self.frame_10 = QtGui.QFrame(self.groupBox_6)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(3)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_10.sizePolicy().hasHeightForWidth())
self.frame_10.setSizePolicy(sizePolicy)
self.frame_10.setFrameShape(QtGui.QFrame.Box)
self.frame_10.setFrameShadow(QtGui.QFrame.Sunken)
self.frame_10.setObjectName("frame_10")
self.horizontalLayout_14 = QtGui.QHBoxLayout(self.frame_10)
self.horizontalLayout_14.setObjectName("horizontalLayout_14")
self.moduleChoosePath = QtGui.QToolButton(self.frame_10)
font = QtGui.QFont()
font.setFamily("MV Boli")
self.moduleChoosePath.setFont(font)
self.moduleChoosePath.setObjectName("moduleChoosePath")
self.horizontalLayout_14.addWidget(self.moduleChoosePath)
self.modulePath = QtGui.QLineEdit(self.frame_10)
font = QtGui.QFont()
font.setFamily("Arial")
self.modulePath.setFont(font)
self.modulePath.setReadOnly(True)
self.modulePath.setObjectName("modulePath")
self.horizontalLayout_14.addWidget(self.modulePath)
self.moduleZip = QtGui.QPushButton(self.frame_10)
self.moduleZip.setText("")
self.moduleZip.setIcon(icon1)
self.moduleZip.setObjectName("moduleZip")
self.horizontalLayout_14.addWidget(self.moduleZip)
self.horizontalLayout_12.addWidget(self.frame_10)
self.verticalLayout.addWidget(self.groupBox_6)
self.groupBox_5 = QtGui.QGroupBox(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Segoe Script")
self.groupBox_5.setFont(font)
self.groupBox_5.setObjectName("groupBox_5")
self.horizontalLayout_10 = QtGui.QHBoxLayout(self.groupBox_5)
self.horizontalLayout_10.setObjectName("horizontalLayout_10")
self.frame_7 = QtGui.QFrame(self.groupBox_5)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_7.sizePolicy().hasHeightForWidth())
self.frame_7.setSizePolicy(sizePolicy)
self.frame_7.setFrameShape(QtGui.QFrame.Box)
self.frame_7.setFrameShadow(QtGui.QFrame.Sunken)
self.frame_7.setObjectName("frame_7")
self.horizontalLayout_9 = QtGui.QHBoxLayout(self.frame_7)
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.label_3 = QtGui.QLabel(self.frame_7)
font = QtGui.QFont()
font.setFamily("Segoe Print")
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.horizontalLayout_9.addWidget(self.label_3)
self.rateCheck = QtGui.QDoubleSpinBox(self.frame_7)
self.rateCheck.setMaximum(300.0)
self.rateCheck.setSingleStep(0.5)
self.rateCheck.setProperty("value", 1.0)
self.rateCheck.setObjectName("rateCheck")
self.horizontalLayout_9.addWidget(self.rateCheck)
self.horizontalLayout_10.addWidget(self.frame_7)
self.frame_6 = QtGui.QFrame(self.groupBox_5)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_6.sizePolicy().hasHeightForWidth())
self.frame_6.setSizePolicy(sizePolicy)
self.frame_6.setFrameShape(QtGui.QFrame.Box)
self.frame_6.setFrameShadow(QtGui.QFrame.Sunken)
self.frame_6.setObjectName("frame_6")
self.horizontalLayout_6 = QtGui.QHBoxLayout(self.frame_6)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.label_4 = QtGui.QLabel(self.frame_6)
self.label_4.setObjectName("label_4")
self.horizontalLayout_6.addWidget(self.label_4)
self.scanFilesRate = QtGui.QDoubleSpinBox(self.frame_6)
self.scanFilesRate.setMinimum(10.0)
self.scanFilesRate.setMaximum(1800.0)
self.scanFilesRate.setSingleStep(5.0)
self.scanFilesRate.setObjectName("scanFilesRate")
self.horizontalLayout_6.addWidget(self.scanFilesRate)
self.scanFilesNow = QtGui.QPushButton(self.frame_6)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.scanFilesNow.sizePolicy().hasHeightForWidth())
self.scanFilesNow.setSizePolicy(sizePolicy)
self.scanFilesNow.setText("")
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("media/refresh.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.scanFilesNow.setIcon(icon2)
self.scanFilesNow.setObjectName("scanFilesNow")
self.horizontalLayout_6.addWidget(self.scanFilesNow)
self.horizontalLayout_10.addWidget(self.frame_6)
self.frame_8 = QtGui.QFrame(self.groupBox_5)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(2)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_8.sizePolicy().hasHeightForWidth())
self.frame_8.setSizePolicy(sizePolicy)
self.frame_8.setFrameShape(QtGui.QFrame.Box)
self.frame_8.setFrameShadow(QtGui.QFrame.Sunken)
self.frame_8.setObjectName("frame_8")
self.horizontalLayout_11 = QtGui.QHBoxLayout(self.frame_8)
self.horizontalLayout_11.setObjectName("horizontalLayout_11")
self.runningInfo = QtGui.QLabel(self.frame_8)
self.runningInfo.setEnabled(True)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.runningInfo.sizePolicy().hasHeightForWidth())
self.runningInfo.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("AR DELANEY")
font.setPointSize(10)
font.setItalic(True)
self.runningInfo.setFont(font)
self.runningInfo.setAutoFillBackground(False)
self.runningInfo.setStyleSheet("color: rgb(0, 0, 255);")
self.runningInfo.setAlignment(QtCore.Qt.AlignCenter)
self.runningInfo.setObjectName("runningInfo")
self.horizontalLayout_11.addWidget(self.runningInfo)
self.stoppedInfo = QtGui.QLabel(self.frame_8)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.stoppedInfo.sizePolicy().hasHeightForWidth())
self.stoppedInfo.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("AR DELANEY")
font.setPointSize(10)
font.setItalic(True)
self.stoppedInfo.setFont(font)
self.stoppedInfo.setStyleSheet("color: rgb(170, 0, 0);")
self.stoppedInfo.setAlignment(QtCore.Qt.AlignCenter)
self.stoppedInfo.setObjectName("stoppedInfo")
self.horizontalLayout_11.addWidget(self.stoppedInfo)
self.btnRun = QtGui.QPushButton(self.frame_8)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnRun.sizePolicy().hasHeightForWidth())
self.btnRun.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Segoe Print")
font.setPointSize(10)
self.btnRun.setFont(font)
self.btnRun.setCheckable(True)
self.btnRun.setObjectName("btnRun")
self.horizontalLayout_11.addWidget(self.btnRun)
self.horizontalLayout_10.addWidget(self.frame_8)
self.verticalLayout.addWidget(self.groupBox_5)
self.groupBox_4 = QtGui.QGroupBox(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Segoe Script")
self.groupBox_4.setFont(font)
self.groupBox_4.setObjectName("groupBox_4")
self.verticalLayout_5 = QtGui.QVBoxLayout(self.groupBox_4)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.eventLog = QtGui.QPlainTextEdit(self.groupBox_4)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.eventLog.sizePolicy().hasHeightForWidth())
self.eventLog.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(8)
self.eventLog.setFont(font)
self.eventLog.setReadOnly(True)
self.eventLog.setPlainText("")
self.eventLog.setObjectName("eventLog")
self.verticalLayout_5.addWidget(self.eventLog)
self.verticalLayout.addWidget(self.groupBox_4)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1155, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QObject.connect(self.btnRun, QtCore.SIGNAL("toggled(bool)"), self.stoppedInfo.setHidden)
QtCore.QObject.connect(self.btnRun, QtCore.SIGNAL("toggled(bool)"), self.runningInfo.setVisible)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.pluginPath, self.componentChoosePath)
MainWindow.setTabOrder(self.componentChoosePath, self.componentPath)
MainWindow.setTabOrder(self.componentPath, self.pluginChoosePath)
MainWindow.setTabOrder(self.pluginChoosePath, self.joomlaPath)
MainWindow.setTabOrder(self.joomlaPath, self.joomlaChoosePath)
MainWindow.setTabOrder(self.joomlaChoosePath, self.componentName)
MainWindow.setTabOrder(self.componentName, self.pluginName)
MainWindow.setTabOrder(self.pluginName, self.eventLog)
MainWindow.setTabOrder(self.eventLog, self.btnRun)
MainWindow.setTabOrder(self.btnRun, self.rateCheck)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "Joomla - AutoUp", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("MainWindow", "Joomla", None, QtGui.QApplication.UnicodeUTF8))
self.joomlaChoosePath.setText(QtGui.QApplication.translate("MainWindow", "Choose Path", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_2.setTitle(QtGui.QApplication.translate("MainWindow", "Component", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("MainWindow", "Name", None, QtGui.QApplication.UnicodeUTF8))
self.componentChoosePath.setText(QtGui.QApplication.translate("MainWindow", "Choose Path", None, QtGui.QApplication.UnicodeUTF8))
self.componetZip.setToolTip(QtGui.QApplication.translate("MainWindow", "Compreess all content in zip.", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_3.setTitle(QtGui.QApplication.translate("MainWindow", "Plugin", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("MainWindow", "Name", None, QtGui.QApplication.UnicodeUTF8))
self.pluginChoosePath.setText(QtGui.QApplication.translate("MainWindow", "Choose Path", None, QtGui.QApplication.UnicodeUTF8))
self.pluginZip.setToolTip(QtGui.QApplication.translate("MainWindow", "Compreess all content in zip.", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_6.setTitle(QtGui.QApplication.translate("MainWindow", "Module", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("MainWindow", "Name", None, QtGui.QApplication.UnicodeUTF8))
self.moduleChoosePath.setText(QtGui.QApplication.translate("MainWindow", "Choose Path", None, QtGui.QApplication.UnicodeUTF8))
self.moduleZip.setToolTip(QtGui.QApplication.translate("MainWindow", "Compreess all content in zip.", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_5.setTitle(QtGui.QApplication.translate("MainWindow", "Configuration", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("MainWindow", "Rate check", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("MainWindow", "Scan Files", None, QtGui.QApplication.UnicodeUTF8))
self.scanFilesNow.setToolTip(QtGui.QApplication.translate("MainWindow", "Rescan all files.", None, QtGui.QApplication.UnicodeUTF8))
self.runningInfo.setText(QtGui.QApplication.translate("MainWindow", "RUNNING", None, QtGui.QApplication.UnicodeUTF8))
self.stoppedInfo.setText(QtGui.QApplication.translate("MainWindow", "STOPPED", None, QtGui.QApplication.UnicodeUTF8))
self.btnRun.setText(QtGui.QApplication.translate("MainWindow", "execute", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_4.setTitle(QtGui.QApplication.translate("MainWindow", "Logs", None, QtGui.QApplication.UnicodeUTF8))
| [
"[email protected]"
]
| |
90df7c2a08db8828c6b83c481efe68e934480b6b | 9f09ecfed34f5014116a1c7afadec2b9c07e9971 | /example_project/some_modules/third_modules/a113.py | 8921acb9eca7563c52dc505778aa4e20402b2fed | [
"MIT"
]
| permissive | Yuriy-Leonov/cython_imports_limit_issue | a04ce73e8c750f3a61d7aaacaf58665273bf4a49 | 2f9e7c02798fb52185dabfe6ce3811c439ca2839 | refs/heads/master | 2020-09-11T23:57:56.677138 | 2019-11-18T17:48:50 | 2019-11-18T17:51:07 | 222,232,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21 | py | class A113:
pass
| [
"[email protected]"
]
| |
c8843c93853bb44731b78258d1617cea61e2fa91 | d998c5fbbd02c1666862b8996639b8f34604a6eb | /dirigible/sheet/importer.py | 49899c0fae53173c5b9e16e783f59ea4e2ecb81a | [
"MIT"
]
| permissive | randfb/dirigible-spreadsheet | 668f8f341fd48add69937d9dc41353360ed4f08c | c771e9a391708f3b219248bf9974e05b1582fdd0 | refs/heads/master | 2020-04-14T02:49:44.123713 | 2017-04-13T06:51:44 | 2017-04-13T06:51:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,619 | py | # Copyright (c) 2010 Resolver Systems Ltd, PythonAnywhere LLP
# See LICENSE.md
#
from chardet.universaldetector import UniversalDetector
from codecs import getreader
import csv
from xlrd import (
error_text_from_code, xldate_as_tuple, XL_CELL_DATE, XL_CELL_ERROR,
)
from worksheet import Worksheet
class DirigibleImportError(Exception):
pass
def worksheet_from_csv(
worksheet, csv_file, start_column, start_row, excel_encoding
):
def autodetect_encoding(csv_file):
detector = UniversalDetector()
for line in csv_file.readlines():
detector.feed(line)
if detector.done: break
detector.close()
csv_file.seek(0)
encoding = detector.result['encoding']
if not encoding:
raise DirigibleImportError('could not recognise encoding')
return encoding
def unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(utf_8_encoder(unicode_csv_data),
dialect=dialect, **kwargs)
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
yield [unicode(cell, 'utf-8') for cell in row]
def utf_8_encoder(unicode_csv_data):
for line in unicode_csv_data:
yield line.encode('utf-8')
if excel_encoding:
encoding = 'windows-1252'
else:
encoding = autodetect_encoding(csv_file)
unicode_translated_csv_file = getreader(encoding)(csv_file)
row = start_row
try:
for csv_row in unicode_csv_reader(unicode_translated_csv_file):
column = start_column
for csv_cell in csv_row:
worksheet[column, row].formula = unicode(csv_cell)
column += 1
row += 1
except Exception, e:
raise DirigibleImportError(unicode(e))
return worksheet
def worksheet_from_excel(excel_sheet):
worksheet = Worksheet()
for col in range(excel_sheet.ncols):
for row in range(excel_sheet.nrows):
cell = excel_sheet.cell(row, col)
if cell.ctype == XL_CELL_ERROR:
formula = '=%s' % (error_text_from_code[cell.value], )
elif cell.ctype == XL_CELL_DATE:
formula = '=DateTime(%s, %s, %s, %s, %s, %s)' % xldate_as_tuple(
cell.value, excel_sheet.book.datemode)
else:
formula = unicode(excel_sheet.cell(row, col).value)
worksheet[col + 1, row + 1].formula = formula
return worksheet
| [
"[email protected]"
]
| |
553dfa692cedff65a719f24eedf7b1caa123174e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02780/s249015131.py | df3ee8200242ba2c41976713fe7df8440c0aa10b | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | n,k = map(int, input().split())
p = [int(x) for x in input().split()]
p_kitaiti=[float((x+1)/2) for x in p]
ruisekiwa=[p_kitaiti[0]]
for i in range(1,len(p_kitaiti)):
ruisekiwa.append(ruisekiwa[-1]+p_kitaiti[i])
ans=ruisekiwa[k-1]
for i in range(1,len(ruisekiwa)-k+1):
ans=max(ans,ruisekiwa[i+k-1]-ruisekiwa[i-1])
print(ans) | [
"[email protected]"
]
| |
58ec6e180cfe9269f7d7743814d631827370228b | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_200/5463.py | 9161570ab3d441e8b67d8aabb2838ba513463229 | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 522 | py | test = int(input())
def isTidy(num):
last = 10 ** 19
while(num > 0):
currD = num % 10
if currD > last:
return False
else:
last = currD
num /= 10
num = int(num)
return True
def lastTidy(max):
lastT = -1
for i in range(1, max + 1):
if isTidy(i):
lastT = i
return lastT
case = 1
while(test > 0):
currN = int(input())
print("Case #{}: {}".format(case, lastTidy(currN)))
case += 1
test -= 1
| [
"[email protected]"
]
| |
fd7393b541b9bb5f909a07532784680f760067d5 | c88a3a539d32a8c0c3426508d85bcead51e28273 | /average_solve_cartpole_v1_DQN.py | cc1d11dbc7d2b3b7893c0d1df3d81686249ce6c8 | []
| no_license | afcarl/DQN_Experiments | f6f4e35190eafa5add1ccb4b6487994af49368cd | 667e072a76bf3702556ae44fe6b4a9ebec143fbd | refs/heads/master | 2020-03-22T08:06:39.490033 | 2017-02-22T03:49:22 | 2017-02-22T03:49:22 | 139,745,429 | 1 | 0 | null | 2018-07-04T16:31:59 | 2018-07-04T16:31:59 | null | UTF-8 | Python | false | false | 6,214 | py | import copy
import gym
from gym import wrappers
import matplotlib.pyplot as plt
import time
from utils import *
from ReplayMemory import ReplayMemory
from agents import AgentEpsGreedy
from valuefunctions import ValueFunctionDQN
# Inspired by necnec's algorithm at:
# https://gym.openai.com/evaluations/eval_89nQ59Y4SbmrlQ0P9pufiA
# And inspired by David Silver's Deep RL tutorial:
# http://www0.cs.ucl.ac.uk/staff/d.silver/web/Resources_files/deep_rl.pdf
# results_dir_prefix = '###'
# upload = False
discount = 0.99
decay_eps = 0.9
batch_size = 64
max_n_ep = 1000
#USING A LARGE CUMULATIVE MINIMUM AVERAGE REWARD FOR THE CART POLE HERE
min_avg_Rwd = 1000000 # Minimum average reward to consider the problem as solved
n_avg_ep = 100 # Number of consecutive episodes to calculate the average reward
# t = get_last_folder_id(results_dir_prefix) + 1 # Calculate next test id
# results_dir = results_dir_prefix + '\\' + str(t).zfill(4)
# os.makedirs(results_dir)
def run_episode(env,
agent,
state_normalizer,
memory,
batch_size,
discount,
max_step=10000):
state = env.reset()
if state_normalizer is not None:
state = state_normalizer.transform(state)[0]
done = False
total_reward = 0
step_durations_s = np.zeros(shape=max_step, dtype=float)
train_duration_s = np.zeros(shape=max_step-batch_size, dtype=float)
progress_msg = "Step {:5d}/{:5d}. Avg step duration: {:3.1f} ms. Avg train duration: {:3.1f} ms. Loss = {:2.10f}."
loss_v = 0
w1_m = 0
w2_m = 0
w3_m = 0
i = 0
action = 0
for i in range(max_step):
t = time.time()
if i > 0 and i % 200 == 0:
print(progress_msg.format(i, max_step,
np.mean(step_durations_s[0:i])*1000,
np.mean(train_duration_s[0:i-batch_size])*1000,
loss_v))
if done:
break
action = agent.act(state)
state_next, reward, done, info = env.step(action)
total_reward += reward
if state_normalizer is not None:
state_next = state_normalizer.transform(state_next)[0]
memory.add((state, action, reward, state_next, done))
if len(memory.memory) > batch_size: # DQN Experience Replay
states_b, actions_b, rewards_b, states_n_b, done_b = zip(*memory.sample(batch_size))
states_b = np.array(states_b)
actions_b = np.array(actions_b)
rewards_b = np.array(rewards_b)
states_n_b = np.array(states_n_b)
done_b = np.array(done_b).astype(int)
q_n_b = agent.predict_q_values(states_n_b) # Action values on the arriving state
targets_b = rewards_b + (1. - done_b) * discount * np.amax(q_n_b, axis=1)
targets = agent.predict_q_values(states_b)
for j, action in enumerate(actions_b):
targets[j, action] = targets_b[j]
t_train = time.time()
loss_v, w1_m, w2_m, w3_m = agent.train(states_b, targets)
train_duration_s[i - batch_size] = time.time() - t_train
state = copy.copy(state_next)
step_durations_s[i] = time.time() - t # Time elapsed during this step
return loss_v, w1_m, w2_m, w3_m, total_reward
env = gym.make("CartPole-v1")
n_actions = env.action_space.n
state_dim = env.observation_space.high.shape[0]
value_function = ValueFunctionDQN(state_dim=state_dim, n_actions=n_actions, batch_size=batch_size)
agent = AgentEpsGreedy(n_actions=n_actions, value_function_model=value_function, eps=0.9)
memory = ReplayMemory(max_size=100000)
Experiments = 3
Experiments_All_Rewards = np.zeros(shape=(max_n_ep))
for e in range(Experiments):
print('Experiment Number ', e)
loss_per_ep = []
w1_m_per_ep = []
w2_m_per_ep = []
w3_m_per_ep = []
total_reward = []
ep = 0
avg_Rwd = -np.inf
episode_end_msg = 'loss={:2.10f}, w1_m={:3.1f}, w2_m={:3.1f}, w3_m={:3.1f}, total reward={}'
while avg_Rwd < min_avg_Rwd and ep < max_n_ep:
if ep >= n_avg_ep:
avg_Rwd = np.mean(total_reward[ep-n_avg_ep:ep])
print("EPISODE {}. Average reward over the last {} episodes: {}.".format(ep, n_avg_ep, avg_Rwd))
else:
print("EPISODE {}.".format(ep))
loss_v, w1_m, w2_m, w3_m, cum_R = run_episode(env, agent, None, memory, batch_size=batch_size, discount=discount,
max_step=15000)
print(episode_end_msg.format(loss_v, w1_m, w2_m, w3_m, cum_R))
if agent.eps > 0.0001:
agent.eps *= decay_eps
# Collect episode results
loss_per_ep.append(loss_v)
w1_m_per_ep.append(w1_m)
w2_m_per_ep.append(w2_m)
w3_m_per_ep.append(w3_m)
total_reward.append(cum_R)
ep += 1
Experiments_All_Rewards = Experiments_All_Rewards + total_reward
np.save('/Users/Riashat/Documents/PhD_Research/BASIC_ALGORITHMS/My_Implementations/gym_examples/DQN_Experiments/Average_DQN_CartPole_V1_Results/' + 'CartPole_V1_cumulative_reward' + 'Experiment_' + str(e) + '.npy', total_reward)
np.save('/Users/Riashat/Documents/PhD_Research/BASIC_ALGORITHMS/My_Implementations/gym_examples/DQN_Experiments/Average_DQN_CartPole_V1_Results/' + 'CartPole_V1_value_function_loss' + 'Experiment_' + str(e) + '.npy', loss_per_ep)
env.close()
print('Saving Average Cumulative Rewards Over Experiments')
Average_Cum_Rwd = np.divide(Experiments_All_Rewards, Experiments)
np.save('/Users/Riashat/Documents/PhD_Research/BASIC_ALGORITHMS/My_Implementations/gym_examples/DQN_Experiments/Average_DQN_CartPole_V1_Results/' + 'Average_Cum_Rwd_CartPole_V1' + '.npy', Average_Cum_Rwd)
print "All Experiments DONE"
#####################
#PLOT RESULTS
eps = range(ep)
plt.figure()
plt.subplot(211)
plt.plot(eps, Average_Cum_Rwd)
Rwd_avg = movingaverage(Average_Cum_Rwd, 100)
plt.plot(eps[len(eps) - len(Rwd_avg):], Rwd_avg)
plt.xlabel("Episode number")
plt.ylabel("Reward per episode")
plt.grid(True)
plt.title("Total reward")
| [
"[email protected]"
]
| |
838df2bb911c2fe314b5652625d5a57027bb1644 | 3eed7b405d1fb9305225718f4c01494bc856da88 | /swExpertAcademy/python_basic/6262_set,dict.py | 9975b904fe77de00c41287c94b3333c4c28cc26a | []
| no_license | tesschung/TIL | b2610d714ec9e4778aafe7538d02b3e3d9fe89d9 | bd79e4af68ef19599cb843f3ce8f5d81a322a2f7 | refs/heads/master | 2020-06-17T11:53:52.595880 | 2019-07-26T08:02:03 | 2019-07-26T08:02:03 | 195,916,443 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 929 | py | """
다음의 결과와 같이 입력된 문자열의 문자 빈도수를 구하는 프로그램을 작성하십시오.
입력
abcdefgabc
출력
a,2
b,2
c,2
d,1
e,1
f,1
g,1
"""
## dict 외우기
a = 'abcdefgabc'
a = list(a)
print(a)
alphabet_count={}
for alphabet in a:
if alphabet in alphabet_count:
alphabet_count[alphabet] += 1
else:
alphabet_count[alphabet] = 1
print(alphabet_count)
print(alphabet_count['a'])
print(alphabet_count.keys())
print(alphabet_count.values())
# key와 value를 한꺼번에 for문을 반복하려면 items() 를 사용합니다.
for key, val in alphabet_count.items():
print('{},{}'.format(key,val))
""" 실수
for alphabet_one in alphabet_count.keys():
print(alphabet_one)
for alphabet_num in alphabet_count.values():
if alphabet_one == alphabet_one:
print(alphabet_num)
print(alphabet_one,",",alphabet_num)
""" | [
"[email protected]"
]
| |
5c80ef81d655783a11e72b107e7460ec06e90969 | 6f3389c93cf1057bca5398940932561c19dbec1d | /Solving Club/2/Diamond.py | cf87ccb41f7082bb50c22b4f1782141b2dcfe91f | []
| no_license | Jeonseoghyeon/APR | 0af9ac1b4ba666a97d78b92e3e599c5a8bc87acc | 7a3822913b84ae6ecf80c8e35c7c8e400981d1fe | refs/heads/master | 2020-12-23T13:55:24.194463 | 2020-06-30T07:00:23 | 2020-06-30T07:00:23 | 237,172,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | import sys
sys.stdin = open("Pascal input.txt","r")
tc = int(input())
for i in range(1,tc+1):
word = input()
lw = len(word)
print('..#.'*(lw)+'.', end='')
print()
print('.#'*(lw*2)+'.')
for x in range(lw):
print('#',end='')
print('.{}.'.format(word[x]),end='')
print('#')
print('.#'*(lw*2)+'.')
print('..#.'*(lw)+'.') | [
"[email protected]"
]
| |
41587d72a71685d71fb47ce204224c2ce9746626 | edbd6ee1c76ed53c24c711ca68d643c94ac97481 | /Algorithms/binary/268_Missing Number.py | 954f204785803a1de2690446d79ae59cd39b43e9 | []
| no_license | Linjiayu6/LeetCode | dd0ee7cff962fce7298227d9ce52ed6700fed98b | 2dea64cb7198bd7c71ba023d074b1036088d5632 | refs/heads/master | 2021-07-22T21:44:23.536275 | 2020-07-09T00:56:28 | 2020-07-09T00:56:28 | 194,610,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,472 | py |
# -*- coding: utf-8 -*
"""
Input: [9,6,4,2,3,5,7,0,1]
Output: 8
i: [1,2]
o: 0
i: [1]
o: 0
"""
# exceed the time limit
class Solution(object):
def missingNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
for i in range(0, len(nums) + 1):
# 这种匹配是没有哈希表快的
if i not in nums:
return i
return len(nums)
# 哈希表 dict和set 比 list查找速度要快很多
class Solution1(object):
def missingNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# hash table
dictionary = set(nums)
for i in range(len(nums) + 1):
if i not in dictionary:
return i
return len(nums)
# 数学方法 该方法是最好的
class Solution3(object):
def missingNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums)
return n * (n + 1) / 2 - sum(nums)
# print Solution3().missingNumber([0,1,2,3])
# 位运算 异或方法
"""
下标: 0,1,2
数字: 1,2,0
每一位累计异或
3^(0^1)^(1^2)^(2^0)
= (0^0)^(1^1)^(2^2)^3
= 3
"""
class Solution3(object):
def missingNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
miss = len(nums)
for index, data in enumerate(nums):
miss = miss ^ index ^ data
return miss | [
"[email protected]"
]
| |
a4df40171c0718ad80625bd65a1955dc68217bff | c46f6015a2b9f7c6e5aec3d043f0b75057756852 | /scripts/create_initial_admin_user.py | 228fdaa760d9de5715af2b45428539b5623ed244 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | peerau/byceps | 6a7db0f8db00d8a77e824018d6efdbab57abbaaf | 1f691280f5c086179ce372a471a0f1d9952a86f5 | refs/heads/master | 2020-07-28T06:31:29.468607 | 2019-09-15T00:18:48 | 2019-09-15T00:18:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,874 | py | #!/usr/bin/env python
"""Create an initial user with admin privileges to begin BYCEPS setup.
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.authorization import service as authorization_service
from byceps.services.user import command_service as user_command_service
from byceps.services.user import creation_service as user_creation_service
from byceps.util.system import get_config_filename_from_env_or_exit
from _util import app_context
@click.command()
@click.option('--screen_name', prompt=True)
@click.option('--email_address', prompt=True)
@click.option('--password', prompt=True, hide_input=True)
def execute(screen_name, email_address, password):
click.echo(f'Creating user "{screen_name}" ... ', nl=False)
user = _create_user(screen_name, email_address, password)
click.secho('done.', fg='green')
click.echo(f'Enabling user "{screen_name}" ... ', nl=False)
user_command_service.enable_user(user.id, user.id)
click.secho('done.', fg='green')
roles = _get_roles()
click.echo(f'Assigning {len(roles)} roles to user "{screen_name}" ... ',
nl=False)
_assign_roles_to_user(roles, user.id)
click.secho('done.', fg='green')
def _create_user(screen_name, email_address, password):
try:
return user_creation_service \
.create_basic_user(screen_name, email_address, password)
except ValueError as e:
raise click.UsageError(e)
def _get_roles():
return authorization_service.get_all_roles_with_titles()
def _assign_roles_to_user(roles, user_id):
for role in roles:
authorization_service.assign_role_to_user(role.id, user_id)
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
| [
"[email protected]"
]
| |
8bcc2361f25b202d1c70ed1e2f5956b8c8c17c64 | bb99cf88152edea3bf6d190bf8f8b7947a97f5b4 | /truffe2/accounting_tools/migrations/0013_auto__add_field_invoiceline_value_ttc.py | d2c9672e843f55f7618087f4edcf874683435b86 | [
"BSD-2-Clause"
]
| permissive | ArcaniteSolutions/truffe2 | ec8c9a621ea4330b0076a7f1db2b319b262926ce | 5406842eb9719d7fdae7137ebd1918f2de61459c | refs/heads/master | 2021-06-13T12:03:23.271274 | 2020-10-05T09:08:31 | 2020-10-05T09:08:31 | 40,756,384 | 10 | 13 | BSD-2-Clause | 2020-11-25T07:58:58 | 2015-08-15T11:34:38 | Python | UTF-8 | Python | false | false | 15,418 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'InvoiceLine.value_ttc'
db.add_column(u'accounting_tools_invoiceline', 'value_ttc',
self.gf('django.db.models.fields.DecimalField')(default=42, max_digits=20, decimal_places=2),
keep_default=False)
def backwards(self, orm):
# Deleting field 'InvoiceLine.value_ttc'
db.delete_column(u'accounting_tools_invoiceline', 'value_ttc')
models = {
u'accounting_core.accountingyear': {
'Meta': {'object_name': 'AccountingYear'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'0_preparing'", 'max_length': '255'}),
'subvention_deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'accounting_core.costcenter': {
'Meta': {'unique_together': "(('name', 'accounting_year'), ('account_number', 'accounting_year'))", 'object_name': 'CostCenter'},
'account_number': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'accounting_year': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.AccountingYear']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']"})
},
u'accounting_tools.invoice': {
'Meta': {'object_name': 'Invoice'},
'accounting_year': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.AccountingYear']"}),
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'annex': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'costcenter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.CostCenter']"}),
'custom_bvr_number': ('django.db.models.fields.CharField', [], {'max_length': '59', 'null': 'True', 'blank': 'True'}),
'date_and_place': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'display_account': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_bvr': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'ending': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'greetins': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'preface': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sign': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']"})
},
u'accounting_tools.invoiceline': {
'Meta': {'object_name': 'InvoiceLine'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'lines'", 'to': u"orm['accounting_tools.Invoice']"}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'quantity': ('django.db.models.fields.DecimalField', [], {'default': '1', 'max_digits': '20', 'decimal_places': '0'}),
'tva': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'}),
'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'}),
'value_ttc': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'})
},
u'accounting_tools.invoicelogging': {
'Meta': {'object_name': 'InvoiceLogging'},
'extra_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['accounting_tools.Invoice']"}),
'what': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'accounting_tools.invoicetag': {
'Meta': {'object_name': 'InvoiceTag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tags'", 'to': u"orm['accounting_tools.Invoice']"}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'accounting_tools.subvention': {
'Meta': {'unique_together': "(('unit', 'unit_blank_name', 'accounting_year'),)", 'object_name': 'Subvention'},
'accounting_year': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.AccountingYear']"}),
'amount_asked': ('django.db.models.fields.SmallIntegerField', [], {}),
'amount_given': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'comment_root': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'mobility_asked': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'mobility_given': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'0_draft'", 'max_length': '255'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']", 'null': 'True', 'blank': 'True'}),
'unit_blank_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'unit_blank_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']", 'null': 'True', 'blank': 'True'})
},
u'accounting_tools.subventionfile': {
'Meta': {'object_name': 'SubventionFile'},
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'files'", 'null': 'True', 'to': u"orm['accounting_tools.Subvention']"}),
'upload_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'uploader': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'accounting_tools.subventionline': {
'Meta': {'object_name': 'SubventionLine'},
'end_date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nb_spec': ('django.db.models.fields.SmallIntegerField', [], {}),
'order': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'place': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'subvention': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'events'", 'to': u"orm['accounting_tools.Subvention']"})
},
u'accounting_tools.subventionlogging': {
'Meta': {'object_name': 'SubventionLogging'},
'extra_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['accounting_tools.Subvention']"}),
'what': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'units.unit': {
'Meta': {'object_name': 'Unit'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_epfl': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'is_commission': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_equipe': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent_hierarchique': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'users.truffeuser': {
'Meta': {'object_name': 'TruffeUser'},
'adresse': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'body': ('django.db.models.fields.CharField', [], {'default': "'.'", 'max_length': '1'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255'}),
'email_perso': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
'iban_ou_ccp': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'nom_banque': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
}
}
complete_apps = ['accounting_tools'] | [
"[email protected]"
]
| |
ea5a7731a98289b59c7c8bfd0a7930fa7f4814f7 | 04b1803adb6653ecb7cb827c4f4aa616afacf629 | /tools/perf/process_perf_results.py | aa7bab4f494ea31a90218756322700bf608450ae | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"LGPL-2.1-only",
"APSL-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-unknown",
"MIT",
"Zlib"
]
| permissive | Samsung/Castanets | 240d9338e097b75b3f669604315b06f7cf129d64 | 4896f732fc747dfdcfcbac3d442f2d2d42df264a | refs/heads/castanets_76_dev | 2023-08-31T09:01:04.744346 | 2021-07-30T04:56:25 | 2021-08-11T05:45:21 | 125,484,161 | 58 | 49 | BSD-3-Clause | 2022-10-16T19:31:26 | 2018-03-16T08:07:37 | null | UTF-8 | Python | false | false | 24,394 | py | #!/usr/bin/env vpython
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import collections
import json
import logging
import multiprocessing
import os
import shutil
import sys
import tempfile
import time
import uuid
from core import path_util
from core import upload_results_to_perf_dashboard
from core import results_merger
path_util.AddAndroidPylibToPath()
try:
from pylib.utils import logdog_helper
except ImportError:
pass
RESULTS_URL = 'https://chromeperf.appspot.com'
# Until we are migrated to LUCI, we will be utilizing a hard
# coded master name based on what is passed in in the build properties.
# See crbug.com/801289 for more details.
MACHINE_GROUP_JSON_FILE = os.path.join(
path_util.GetChromiumSrcDir(), 'tools', 'perf', 'core',
'perf_dashboard_machine_group_mapping.json')
JSON_CONTENT_TYPE = 'application/json'
# Cache of what data format (ChartJSON, Histograms, etc.) each results file is
# in so that only one disk read is required when checking the format multiple
# times.
_data_format_cache = {}
DATA_FORMAT_GTEST = 'gtest'
DATA_FORMAT_CHARTJSON = 'chartjson'
DATA_FORMAT_HISTOGRAMS = 'histograms'
DATA_FORMAT_UNKNOWN = 'unknown'
# See https://crbug.com/923564.
# We want to switch over to using histograms for everything, but converting from
# the format output by gtest perf tests to histograms has introduced several
# problems. So, only perform the conversion on tests that are whitelisted and
# are okay with potentially encountering issues.
GTEST_CONVERSION_WHITELIST = [
'xr.vr.common_perftests',
]
def _GetMachineGroup(build_properties):
machine_group = None
if build_properties.get('perf_dashboard_machine_group', False):
# Once luci migration is complete this will exist as a property
# in the build properties
machine_group = build_properties['perf_dashboard_machine_group']
else:
mastername_mapping = {}
with open(MACHINE_GROUP_JSON_FILE) as fp:
mastername_mapping = json.load(fp)
legacy_mastername = build_properties['mastername']
if mastername_mapping.get(legacy_mastername):
machine_group = mastername_mapping[legacy_mastername]
if not machine_group:
raise ValueError(
'Must set perf_dashboard_machine_group or have a valid '
'mapping in '
'src/tools/perf/core/perf_dashboard_machine_group_mapping.json'
'See bit.ly/perf-dashboard-machine-group for more details')
return machine_group
def _upload_perf_results(json_to_upload, name, configuration_name,
build_properties, service_account_file, output_json_file):
"""Upload the contents of result JSON(s) to the perf dashboard."""
args= [
'--buildername', build_properties['buildername'],
'--buildnumber', build_properties['buildnumber'],
'--name', name,
'--configuration-name', configuration_name,
'--results-file', json_to_upload,
'--results-url', RESULTS_URL,
'--got-revision-cp', build_properties['got_revision_cp'],
'--got-v8-revision', build_properties['got_v8_revision'],
'--got-webrtc-revision', build_properties['got_webrtc_revision'],
'--output-json-file', output_json_file,
'--perf-dashboard-machine-group', _GetMachineGroup(build_properties)
]
is_luci = False
buildbucket = build_properties.get('buildbucket', {})
if isinstance(buildbucket, basestring):
buildbucket = json.loads(buildbucket)
if ('build' in buildbucket and
buildbucket['build'].get('bucket') == 'luci.chrome.ci'):
is_luci = True
if is_luci and _is_gtest(json_to_upload) and (
name in GTEST_CONVERSION_WHITELIST):
path_util.AddTracingToPath()
from tracing.value import ( # pylint: disable=no-name-in-module
gtest_json_converter)
gtest_json_converter.ConvertGtestJsonFile(json_to_upload)
_data_format_cache[json_to_upload] = DATA_FORMAT_HISTOGRAMS
if 'build' in buildbucket:
args += [
'--project', buildbucket['build'].get('project'),
'--buildbucket', buildbucket['build'].get('bucket'),
]
if service_account_file and not is_luci:
args += ['--service-account-file', service_account_file]
if build_properties.get('git_revision'):
args.append('--git-revision')
args.append(build_properties['git_revision'])
if _is_histogram(json_to_upload):
args.append('--send-as-histograms')
return upload_results_to_perf_dashboard.main(args)
def _is_histogram(json_file):
return _determine_data_format(json_file) == DATA_FORMAT_HISTOGRAMS
def _is_gtest(json_file):
return _determine_data_format(json_file) == DATA_FORMAT_GTEST
def _determine_data_format(json_file):
if json_file not in _data_format_cache:
with open(json_file) as f:
data = json.load(f)
if isinstance(data, list):
_data_format_cache[json_file] = DATA_FORMAT_HISTOGRAMS
elif isinstance(data, dict):
if 'charts' in data:
_data_format_cache[json_file] = DATA_FORMAT_CHARTJSON
else:
_data_format_cache[json_file] = DATA_FORMAT_GTEST
else:
_data_format_cache[json_file] = DATA_FORMAT_UNKNOWN
return _data_format_cache[json_file]
_data_format_cache[json_file] = DATA_FORMAT_UNKNOWN
return _data_format_cache[json_file]
def _merge_json_output(output_json, jsons_to_merge, extra_links):
"""Merges the contents of one or more results JSONs.
Args:
output_json: A path to a JSON file to which the merged results should be
written.
jsons_to_merge: A list of JSON files that should be merged.
extra_links: a (key, value) map in which keys are the human-readable strings
which describe the data, and value is logdog url that contain the data.
"""
begin_time = time.time()
merged_results = results_merger.merge_test_results(jsons_to_merge)
# Only append the perf results links if present
if extra_links:
merged_results['links'] = extra_links
with open(output_json, 'w') as f:
json.dump(merged_results, f)
end_time = time.time()
print_duration('Merging json test results', begin_time, end_time)
return 0
def _handle_perf_json_test_results(
benchmark_directory_map, test_results_list):
begin_time = time.time()
benchmark_enabled_map = {}
for benchmark_name, directories in benchmark_directory_map.iteritems():
for directory in directories:
# Obtain the test name we are running
is_ref = '.reference' in benchmark_name
enabled = True
try:
with open(os.path.join(directory, 'test_results.json')) as json_data:
json_results = json.load(json_data)
if not json_results:
# Output is null meaning the test didn't produce any results.
# Want to output an error and continue loading the rest of the
# test results.
print 'No results produced for %s, skipping upload' % directory
continue
if json_results.get('version') == 3:
# Non-telemetry tests don't have written json results but
# if they are executing then they are enabled and will generate
# chartjson results.
if not bool(json_results.get('tests')):
enabled = False
if not is_ref:
# We don't need to upload reference build data to the
# flakiness dashboard since we don't monitor the ref build
test_results_list.append(json_results)
except IOError as e:
# TODO(crbug.com/936602): Figure out how to surface these errors. Should
# we have a non-zero exit code if we error out?
logging.error('Failed to obtain test results for %s: %s',
benchmark_name, e)
if not enabled:
# We don't upload disabled benchmarks or tests that are run
# as a smoke test
print 'Benchmark %s disabled' % benchmark_name
benchmark_enabled_map[benchmark_name] = enabled
end_time = time.time()
print_duration('Analyzing perf json test results', begin_time, end_time)
return benchmark_enabled_map
def _generate_unique_logdog_filename(name_prefix):
return name_prefix + '_' + str(uuid.uuid4())
def _handle_perf_logs(benchmark_directory_map, extra_links):
""" Upload benchmark logs to logdog and add a page entry for them. """
begin_time = time.time()
benchmark_logs_links = collections.defaultdict(list)
for benchmark_name, directories in benchmark_directory_map.iteritems():
for directory in directories:
benchmark_log_file = os.path.join(directory, 'benchmark_log.txt')
if os.path.exists(benchmark_log_file):
with open(benchmark_log_file) as f:
uploaded_link = logdog_helper.text(
name=_generate_unique_logdog_filename(benchmark_name),
data=f.read())
benchmark_logs_links[benchmark_name].append(uploaded_link)
logdog_file_name = _generate_unique_logdog_filename('Benchmarks_Logs')
logdog_stream = logdog_helper.text(
logdog_file_name, json.dumps(benchmark_logs_links, sort_keys=True,
indent=4, separators=(',', ': ')),
content_type=JSON_CONTENT_TYPE)
extra_links['Benchmarks logs'] = logdog_stream
end_time = time.time()
print_duration('Generating perf log streams', begin_time, end_time)
def _handle_benchmarks_shard_map(benchmarks_shard_map_file, extra_links):
begin_time = time.time()
with open(benchmarks_shard_map_file) as f:
benchmarks_shard_data = json.load(f)
logdog_file_name = _generate_unique_logdog_filename('Benchmarks_Shard_Map')
logdog_stream = logdog_helper.text(
logdog_file_name, json.dumps(benchmarks_shard_data, sort_keys=True,
indent=4, separators=(',', ': ')),
content_type=JSON_CONTENT_TYPE)
extra_links['Benchmarks shard map'] = logdog_stream
end_time = time.time()
print_duration('Generating benchmark shard map stream', begin_time, end_time)
def _get_benchmark_name(directory):
return os.path.basename(directory).replace(" benchmark", "")
def process_perf_results(output_json, configuration_name,
service_account_file,
build_properties, task_output_dir,
smoke_test_mode, output_results_dir):
"""Process perf results.
Consists of merging the json-test-format output, uploading the perf test
output (chartjson and histogram), and store the benchmark logs in logdog.
Each directory in the task_output_dir represents one benchmark
that was run. Within this directory, there is a subdirectory with the name
of the benchmark that was run. In that subdirectory, there is a
perftest-output.json file containing the performance results in histogram
or dashboard json format and an output.json file containing the json test
results for the benchmark.
Returns:
(return_code, upload_results_map):
return_code is 0 if the whole operation is successful, non zero otherwise.
benchmark_upload_result_map: the dictionary that describe which benchmarks
were successfully uploaded.
"""
begin_time = time.time()
return_code = 0
benchmark_upload_result_map = {}
directory_list = [
f for f in os.listdir(task_output_dir)
if not os.path.isfile(os.path.join(task_output_dir, f))
]
benchmark_directory_list = []
benchmarks_shard_map_file = None
for directory in directory_list:
for f in os.listdir(os.path.join(task_output_dir, directory)):
path = os.path.join(task_output_dir, directory, f)
if os.path.isdir(path):
benchmark_directory_list.append(path)
elif path.endswith('benchmarks_shard_map.json'):
benchmarks_shard_map_file = path
# Now create a map of benchmark name to the list of directories
# the lists were written to.
benchmark_directory_map = {}
for directory in benchmark_directory_list:
benchmark_name = _get_benchmark_name(directory)
if benchmark_name in benchmark_directory_map.keys():
benchmark_directory_map[benchmark_name].append(directory)
else:
benchmark_directory_map[benchmark_name] = [directory]
test_results_list = []
build_properties = json.loads(build_properties)
if not configuration_name:
# we are deprecating perf-id crbug.com/817823
configuration_name = build_properties['buildername']
extra_links = {}
# First, upload benchmarks shard map to logdog and add a page
# entry for it in extra_links.
if benchmarks_shard_map_file:
_handle_benchmarks_shard_map(benchmarks_shard_map_file, extra_links)
# Second, upload all the benchmark logs to logdog and add a page entry for
# those links in extra_links.
_handle_perf_logs(benchmark_directory_map, extra_links)
# Then try to obtain the list of json test results to merge
# and determine the status of each benchmark.
benchmark_enabled_map = _handle_perf_json_test_results(
benchmark_directory_map, test_results_list)
if not smoke_test_mode:
try:
return_code, benchmark_upload_result_map = _handle_perf_results(
benchmark_enabled_map, benchmark_directory_map,
configuration_name, build_properties, service_account_file,
extra_links, output_results_dir)
except Exception:
logging.exception('Error handling perf results jsons')
return_code = 1
# Finally, merge all test results json, add the extra links and write out to
# output location
_merge_json_output(output_json, test_results_list, extra_links)
end_time = time.time()
print_duration('Total process_perf_results', begin_time, end_time)
return return_code, benchmark_upload_result_map
def _merge_chartjson_results(chartjson_dicts):
merged_results = chartjson_dicts[0]
for chartjson_dict in chartjson_dicts[1:]:
for key in chartjson_dict:
if key == 'charts':
for add_key in chartjson_dict[key]:
merged_results[key][add_key] = chartjson_dict[key][add_key]
return merged_results
def _merge_histogram_results(histogram_lists):
merged_results = []
for histogram_list in histogram_lists:
merged_results += histogram_list
return merged_results
def _merge_perf_results(benchmark_name, results_filename, directories):
begin_time = time.time()
collected_results = []
for directory in directories:
filename = os.path.join(directory, 'perf_results.json')
try:
with open(filename) as pf:
collected_results.append(json.load(pf))
except IOError as e:
# TODO(crbug.com/936602): Figure out how to surface these errors. Should
# we have a non-zero exit code if we error out?
logging.error('Failed to obtain perf results from %s: %s',
directory, e)
if not collected_results:
logging.error('Failed to obtain any perf results from %s.',
benchmark_name)
return
# Assuming that multiple shards will only be chartjson or histogram set
# Non-telemetry benchmarks only ever run on one shard
merged_results = []
if isinstance(collected_results[0], dict):
merged_results = _merge_chartjson_results(collected_results)
elif isinstance(collected_results[0], list):
merged_results =_merge_histogram_results(collected_results)
with open(results_filename, 'w') as rf:
json.dump(merged_results, rf)
end_time = time.time()
print_duration(('%s results merging' % (benchmark_name)),
begin_time, end_time)
def _upload_individual(
benchmark_name, directories, configuration_name,
build_properties, output_json_file, service_account_file):
tmpfile_dir = tempfile.mkdtemp()
try:
upload_begin_time = time.time()
# There are potentially multiple directores with results, re-write and
# merge them if necessary
results_filename = None
if len(directories) > 1:
merge_perf_dir = os.path.join(
os.path.abspath(tmpfile_dir), benchmark_name)
if not os.path.exists(merge_perf_dir):
os.makedirs(merge_perf_dir)
results_filename = os.path.join(
merge_perf_dir, 'merged_perf_results.json')
_merge_perf_results(benchmark_name, results_filename, directories)
else:
# It was only written to one shard, use that shards data
results_filename = os.path.join(directories[0], 'perf_results.json')
results_size_in_mib = os.path.getsize(results_filename) / (2 ** 20)
print 'Uploading perf results from %s benchmark (size %s Mib)' % (
benchmark_name, results_size_in_mib)
with open(output_json_file, 'w') as oj:
upload_return_code = _upload_perf_results(
results_filename,
benchmark_name, configuration_name, build_properties,
service_account_file, oj)
upload_end_time = time.time()
print_duration(('%s upload time' % (benchmark_name)),
upload_begin_time, upload_end_time)
return (benchmark_name, upload_return_code == 0)
finally:
shutil.rmtree(tmpfile_dir)
def _upload_individual_benchmark(params):
try:
return _upload_individual(*params)
except Exception:
benchmark_name = params[0]
upload_succeed = False
logging.exception('Error uploading perf result of %s' % benchmark_name)
return benchmark_name, upload_succeed
def _GetCpuCount(log=True):
try:
return multiprocessing.cpu_count()
except NotImplementedError:
if log:
logging.warn(
'Failed to get a CPU count for this bot. See crbug.com/947035.')
# TODO(crbug.com/948281): This is currently set to 4 since the mac masters
# only have 4 cores. Once we move to all-linux, this can be increased or
# we can even delete this whole function and use multiprocessing.cpu_count()
# directly.
return 4
def _handle_perf_results(
benchmark_enabled_map, benchmark_directory_map, configuration_name,
build_properties, service_account_file, extra_links,
output_results_dir):
"""
Upload perf results to the perf dashboard.
This method also upload the perf results to logdog and augment it to
|extra_links|.
Returns:
(return_code, benchmark_upload_result_map)
return_code is 0 if this upload to perf dashboard successfully, 1
otherwise.
benchmark_upload_result_map is a dictionary describes which benchmark
was successfully uploaded.
"""
begin_time = time.time()
# Upload all eligible benchmarks to the perf dashboard
results_dict = {}
invocations = []
for benchmark_name, directories in benchmark_directory_map.iteritems():
if not benchmark_enabled_map.get(benchmark_name, False):
continue
# Create a place to write the perf results that you will write out to
# logdog.
output_json_file = os.path.join(
output_results_dir, (str(uuid.uuid4()) + benchmark_name))
results_dict[benchmark_name] = output_json_file
invocations.append((
benchmark_name, directories, configuration_name,
build_properties, output_json_file, service_account_file))
# Kick off the uploads in multiple processes
pool = multiprocessing.Pool(_GetCpuCount())
try:
async_result = pool.map_async(
_upload_individual_benchmark, invocations)
# TODO(crbug.com/947035): What timeout is reasonable?
results = async_result.get(timeout=4000)
except multiprocessing.TimeoutError:
logging.error('Failed uploading benchmarks to perf dashboard in parallel')
results = []
for benchmark_name in benchmark_directory_map:
results.append((benchmark_name, False))
finally:
pool.terminate()
# Keep a mapping of benchmarks to their upload results
benchmark_upload_result_map = {}
for r in results:
benchmark_upload_result_map[r[0]] = r[1]
logdog_dict = {}
upload_failures_counter = 0
logdog_stream = None
logdog_label = 'Results Dashboard'
for benchmark_name, output_file in results_dict.iteritems():
upload_succeed = benchmark_upload_result_map[benchmark_name]
if not upload_succeed:
upload_failures_counter += 1
is_reference = '.reference' in benchmark_name
_write_perf_data_to_logfile(
benchmark_name, output_file,
configuration_name, build_properties, logdog_dict,
is_reference, upload_failure=not upload_succeed)
logdog_file_name = _generate_unique_logdog_filename('Results_Dashboard_')
logdog_stream = logdog_helper.text(logdog_file_name,
json.dumps(dict(logdog_dict), sort_keys=True,
indent=4, separators=(',', ': ')),
content_type=JSON_CONTENT_TYPE)
if upload_failures_counter > 0:
logdog_label += (' %s merge script perf data upload failures' %
upload_failures_counter)
extra_links[logdog_label] = logdog_stream
end_time = time.time()
print_duration('Uploading results to perf dashboard', begin_time, end_time)
if upload_failures_counter > 0:
return 1, benchmark_upload_result_map
return 0, benchmark_upload_result_map
def _write_perf_data_to_logfile(benchmark_name, output_file,
configuration_name, build_properties,
logdog_dict, is_ref, upload_failure):
viewer_url = None
# logdog file to write perf results to
if os.path.exists(output_file):
output_json_file = logdog_helper.open_text(benchmark_name)
with open(output_file) as f:
try:
results = json.load(f)
json.dump(results, output_json_file,
indent=4, separators=(',', ': '))
except ValueError:
print ('Error parsing perf results JSON for benchmark %s' %
benchmark_name)
output_json_file.close()
viewer_url = output_json_file.get_viewer_url()
else:
print ("Perf results JSON file doesn't exist for benchmark %s" %
benchmark_name)
base_benchmark_name = benchmark_name.replace('.reference', '')
if base_benchmark_name not in logdog_dict:
logdog_dict[base_benchmark_name] = {}
# add links for the perf results and the dashboard url to
# the logs section of buildbot
if is_ref:
logdog_dict[base_benchmark_name]['perf_results_ref'] = viewer_url
if upload_failure:
logdog_dict[base_benchmark_name]['ref_upload_failed'] = 'True'
else:
logdog_dict[base_benchmark_name]['dashboard_url'] = (
upload_results_to_perf_dashboard.GetDashboardUrl(
benchmark_name,
configuration_name, RESULTS_URL,
build_properties['got_revision_cp'],
_GetMachineGroup(build_properties)))
logdog_dict[base_benchmark_name]['perf_results'] = viewer_url
if upload_failure:
logdog_dict[base_benchmark_name]['upload_failed'] = 'True'
def print_duration(step, start, end):
print 'Duration of %s: %d seconds' % (step, end-start)
def main():
""" See collect_task.collect_task for more on the merge script API. """
print sys.argv
parser = argparse.ArgumentParser()
# configuration-name (previously perf-id) is the name of bot the tests run on
# For example, buildbot-test is the name of the android-go-perf bot
# configuration-name and results-url are set in the json file which is going
# away tools/perf/core/chromium.perf.fyi.extras.json
parser.add_argument('--configuration-name', help=argparse.SUPPRESS)
parser.add_argument('--service-account-file', help=argparse.SUPPRESS,
default=None)
parser.add_argument('--build-properties', help=argparse.SUPPRESS)
parser.add_argument('--summary-json', help=argparse.SUPPRESS)
parser.add_argument('--task-output-dir', help=argparse.SUPPRESS)
parser.add_argument('-o', '--output-json', required=True,
help=argparse.SUPPRESS)
parser.add_argument('json_files', nargs='*', help=argparse.SUPPRESS)
parser.add_argument('--smoke-test-mode', action='store_true',
help='This test should be run in smoke test mode'
' meaning it does not upload to the perf dashboard')
args = parser.parse_args()
output_results_dir = tempfile.mkdtemp('outputresults')
try:
return_code, _ = process_perf_results(
args.output_json, args.configuration_name,
args.service_account_file,
args.build_properties, args.task_output_dir,
args.smoke_test_mode, output_results_dir)
return return_code
finally:
shutil.rmtree(output_results_dir)
if __name__ == '__main__':
sys.exit(main())
| [
"[email protected]"
]
| |
e84db5466d085d1a44c4c8a2befd42eb2cc6ca59 | 0cba5529e387ba0f077b4e8ddeb96f914004f5df | /malaya/text/entity/food.py | b01c4d47015e3e2bee4c9c0b6edd8425e7db06a0 | [
"MIT"
]
| permissive | AsyrafAzlan/Malaya | dc78398ee6880578f40c5646a48882a5913217ae | 3d5166173cf74881f7a56fffaaf391813c55d4f1 | refs/heads/master | 2021-05-21T22:47:41.863857 | 2020-04-03T15:00:21 | 2020-04-03T15:00:21 | 252,841,526 | 1 | 0 | MIT | 2020-04-03T21:04:44 | 2020-04-03T21:04:44 | null | UTF-8 | Python | false | false | 9,550 | py | # https://en.wikipedia.org/wiki/List_of_Malaysian_dishes#Malaysian_dishes
malay = {
'ambuyat',
'asam pedas',
'ayam bakar',
'ayam goreng',
'ayam masak kicap',
'ayam masak merah',
'bubur asyura',
'bubur kacang hijau',
'bubur lambuk',
'bubur pedas',
'gulai',
'ikan bakar',
'ikan goreng',
'kangkung belacan',
'ketupat',
'laksa',
'lemang',
'lontong',
'mee bandung muar',
'nasi ambeng',
'nasi campur',
'nasi dagang',
'nasi goreng',
'nasi goreng pattaya',
'nasi kerabu',
'nasi lemak',
'nasi tumpang',
'nasi ulam',
'otak otak',
'pulot tartal',
'rendang',
'rojak',
'roti jala',
'satay',
'satay celup',
'sup kambing',
'ulam',
'nasi ayam',
}
chinese = {
'bak kut teh',
'bakkwa',
'banmian',
'bean sprouts chicken',
'bubur pulut hitam',
'chai tow kway',
'char kway teow',
'char siu',
'chee cheong fun',
'chwee kueh',
'claypot chicken rice',
'duck soup noodles',
'economy rice',
'fish ball',
'hae mee',
'hainanese chicken rice',
'hokkien mee',
'kaya toast',
'kolo mee',
'loh bak',
'lor mee',
'mee pok',
'mixed rice',
'oyster omelette',
'pan mee',
'pao',
'popiah',
'tong sui',
'wonton mee',
'yong tau foo',
'you char kway',
'yusheng',
'asam laksa',
'bubur cha cha',
'cap cai',
'curry mee',
'laksa lemak',
'mee siam',
'nyonya bak chang',
}
indian = {
'adhirasam',
'appam',
'bajji',
'banana leaf rice',
'chapati',
'curd rice',
'dalcha',
'dosai',
'fish head curry',
'fish molee',
'idli',
'korma',
'maggi goreng',
'mamak',
'mee goreng',
'mee goreng mamak',
'mee rebus',
'murtabak',
'murukku',
'naan',
'nasi biryani',
'nasi kandar',
'paniyaram',
'papadum',
'pasembur',
'payasam',
'pongal',
'puliyodarai',
'puri',
'putu mayam',
'rasam',
'rojak',
'roti canai',
'roti prata',
'roti tissue',
'sambar',
'sup kambing',
'upma',
'vadai',
}
sabah_sarawak = {
'bosou',
'hinava',
'linatan',
'linongot',
'manok pansoh',
'midin',
'nasi kombos',
'nasi laru',
'nasik aruk',
'sinalau bakas',
'tonokon',
'tuhau',
'umai',
}
kuih = {
'ang ku',
'bahulu',
'kek batik',
'heong peng',
'kek sarawak',
'amplang',
'lekor',
'mee siput muar',
'rempeyek',
'apam balik',
'bingka',
'borasa',
'cakoi',
'kuih cincin',
'cucur',
'dodol',
'kuih gelang',
'kuih jala',
'kuih jelurut',
'kuih jemput jemput',
'karipap',
'kuih kelupis',
'kochi',
'penyaram',
'pisang goreng',
'kuih gulung',
'kuih kasturi',
'kuih lapis',
'kuih lidah',
'kuih makmur',
'ondeh ondeh',
'otokon',
'kuih pais',
'pie tee',
'pulut inti',
'pulut panggang',
'putu bambu',
'putu bumbong',
'putu mangkuk',
'kuih gulong',
'kuih kapit',
'kuih sapit',
'seri muka',
}
dessert = {'lamban', 'punjung', 'tapai', 'wajid'}
malaysian_food = malay | chinese | indian | sabah_sarawak | kuih | dessert
hot_ice_beverage = {
'teh',
'milo',
'neslo',
'kopi',
'soya',
'susu',
'sirap',
'kosong',
}
hot_ice_beverage_regex = '(?:\s*|ice|hot|cold|ais|panas|sejuk)\s*(?:susu|teh|kosong|soya|kopi|neslo|milo|sirap|coffee|tea)\s*(?:o|\s*)\s*(?:ice|sejuk|hot|panas|cold|ais|\s*)\s*(?:tarik|pull|)\\b'
fruit_juice_regex = '(?:juice|jus)\s*(?:durian|kelapa|manggis|strawberry|nanas|langsat|nangka|cempedak|kedondong|pisang|laici|betik|tembikai|jambu|ciku|nenas|mangga|orange|watermelon|rambutan|strawberri|apple|guava|epal|oren|mango|longan|asam|belimbing|carrot|limau)|(?:durian|kelapa|manggis|strawberry|nanas|langsat|nangka|cempedak|kedondong|pisang|laici|betik|tembikai|jambu|ciku|nenas|mangga|orange|watermelon|rambutan|strawberri|apple|guava|epal|oren|mango|longan|asam|belimbing|carrot|limau)\s*(?:juice|jus)\\b'
unique_beverage = {
'ais kacang',
'asam boi',
'leng chee kang',
'barley ais',
'tau hua',
'cendol',
'bubur cha bha',
'tong sui',
'tealive',
'bubble tea',
'milk tea',
'chatime',
'boba',
'laici kang',
}
unique_beverage_regex = '(?:' + '|'.join(list(unique_beverage)) + ')'
fruits = {
'durian',
'manggis',
'longan',
'laici',
'rambutan',
'langsat',
'cempedak',
'guava',
'jambu',
'betik',
'belimbing',
'tembikai',
'watermelon',
'nangka',
'strawberry',
'strawberri',
'mangga',
'mango',
'nanas',
'nenas',
'ciku',
'apple',
'epal',
'carrot',
'pisang',
'kelapa',
'limau',
'asam',
'kedondong',
'oren',
'orange',
}
american = {
'hotdog',
'pie',
'cookie',
'cobbler',
'nacho',
'stew',
'barbecue',
'lime pie',
'tots',
'tater',
'sourdough',
'cobb',
'salad',
'pot roast',
'twinkies',
'jerky',
'fajitas',
'banana split',
'cornbread',
'jambalaya',
'steak',
'salmon',
'meatloaf',
'grit',
'macaroni',
'cheese',
'crabcake',
'chip',
'cioppino',
'bean',
'popcorn',
'ayam goreng',
'fried chicken',
'waffle',
}
italian = {
'pizza',
'focaccia',
'carbonara',
'pizza',
'risotto',
'pasta',
'noodle',
'tiramisu',
'lasagne',
}
# https://en.wikipedia.org/wiki/Indonesian_cuisine
indonesian = {
'acar',
'ayam bakar',
'ayam goreng',
'ayam kecap',
'bubur ayam',
'bubur kacang hijau',
'bubur ketan hitam',
'gado gado',
'ikan asin',
'ikan bakar',
'ikan goreng',
'ketupat',
'lontong',
'nasi bakar',
'nasi campur',
'nasi goreng',
'nasi kuning',
'otak otak',
'perkedel',
'pindang',
'rijsttafel',
'rujak',
'sambal',
'sambal goreng udang',
'sate',
'semur',
'sop buntut',
'soto',
'soto mie',
'tahu goreng',
'telur pindang',
'tempeh goreng',
'tempe goreng',
'tumis kangkung',
}
acehnese = {'mie aceh', 'mie caluk', 'nasi gurih', 'roti canai', 'timphan'}
balinese = {'babi guling', 'betutu', 'lawar', 'nasi bali', 'sate lilit'}
bataknese = {
'arsik',
'babi panggang',
'bentidali ni horbo',
'dengke mas na niura',
'itak gurgur',
'lampet',
'manuk napinadar',
'na nidugu',
'na tinombur',
'ombusombus',
'pohulpohul',
'saksang',
'sambal',
'sasagun',
'tanggotanggo',
'tipatipa',
'tuktuk',
}
betawi = {
'asinan betawi',
'kerak telor',
'ketoprak',
'ketupat sayur',
'laksa betawi',
'mie kangkung',
'nasi uduk',
'nasi ulam',
'soto betawi',
}
bugis_makassar = {
'burasa',
'coto makassar',
'gogos',
'kapurung',
'konro',
'pallubasa',
'sop saudara',
}
cirebonese = {
'docang',
'empal gentong',
'mie koclok',
'sega jamblang',
'sega lengko',
'tahu gejrot',
}
chinese_peranakan = {
'babi kecap',
'bakmi',
'bakpau',
'bakso',
'cap cai',
'fu yung hai',
'i fu mie',
'kepiting saus tiram',
'kwetiau goreng',
'laksa',
'lontong cap go meh',
'lumpia',
'mie ayam',
'mie goreng',
'mie kering',
'mun tahu',
'nasi tim',
'sapo tahu',
'sekba',
'siomay',
'swikee',
}
javanese = {
'ayam penyet',
'botok',
'buntil',
'gudeg',
'iga penyet',
'krechek',
'kuluban',
'mie jawa',
'mie rebus',
'nasi ambeng',
'nasi bogana',
'nasi kucing',
'nasi liwet',
'nasi pecel',
'opor',
'pecel',
'pecel lele',
'rambak petis',
'rawon',
'sayur lodeh',
'selat solo',
'serundeng',
'tahu campur',
'tongseng',
'trancam',
'tumpeng',
'urap',
}
minangkabau = {
'asam pedas',
'balado',
'dendeng',
'galamai',
'gulai',
'kepiting saus padang',
'keripik sanjay',
'lemang',
'nasi kapau',
'nasi padang',
'rendang',
'sate padang',
'soto padang',
'udang balado',
}
palembang = {
'burgo',
'laksan',
'lakso',
'mie celor',
'nasi minyak',
'pempek',
'pindang',
'tekwan',
'tempoyak',
}
sasak = {'ayam taliwang', 'beberuk terong', 'plecing kangkung'}
sundanese = {
'asinan bogor',
'bandung',
'batagor',
'empal gepuk',
'karedok',
'kupat tahu',
'laksa bogor',
'lalab',
'mie kocok',
'nasi timbel',
'oncom',
'pepes',
'sate bandeng',
'sayur asem',
'seblak',
'soto',
'tahu sumedang',
'tauge goreng',
}
middle_eastern = {
'hummus',
'manakeesh',
'halloumi',
'meddamas',
'falafel',
'tabouleh',
'moutabal',
'ghanoush',
'fattoush',
'ummali',
'shanklish',
'shawarma',
'tawook',
'dolma',
'kofta',
'quwarmah',
'mansaf',
'kebab',
'baklava',
'knafeh',
'masgouf',
}
indonesian_food = (
indonesian
| acehnese
| balinese
| bataknese
| betawi
| bugis_makassar
| cirebonese
| chinese_peranakan
| javanese
| minangkabau
| palembang
| sasak
| sundanese
)
total_foods = (
malaysian_food | indonesian_food | american | italian | middle_eastern
)
total_foods_regex = '(?:' + '|'.join(list(total_foods)) + ')'
| [
"[email protected]"
]
| |
0ea7d01fd4094a8bf1e86275477930c3ab0eda4f | 32c56293475f49c6dd1b0f1334756b5ad8763da9 | /google-cloud-sdk/lib/googlecloudsdk/command_lib/ml/translate/hooks.py | 34fbbf2dc37439bcabeac9f790eaad94761c8833 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | bopopescu/socialliteapp | b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494 | 85bb264e273568b5a0408f733b403c56373e2508 | refs/heads/master | 2022-11-20T03:01:47.654498 | 2020-02-01T20:29:43 | 2020-02-01T20:29:43 | 282,403,750 | 0 | 0 | MIT | 2020-07-25T08:31:59 | 2020-07-25T08:31:59 | null | UTF-8 | Python | false | false | 6,520 | py | # -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Declarative hooks for ml speech."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.calliope import base as calliope_base
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import properties
from googlecloudsdk.core.util import files
SPEECH_API = 'translate'
def _GetApiVersion(args):
if args.calliope_command.ReleaseTrack() == calliope_base.ReleaseTrack.BETA:
return 'v3'
else:
return 'v3beta1'
class Error(exceptions.Error):
"""Exceptions for this module."""
class ContentFileError(Error):
"""Error if content file can't be read and isn't a GCS URL."""
def UpdateRequestLangDetection(unused_instance_ref, args, request):
"""The hook to inject content into the language detection request."""
content = args.content
content_file = args.content_file
messages = apis.GetMessagesModule(SPEECH_API, _GetApiVersion(args))
detect_language_request = messages.DetectLanguageRequest()
project = properties.VALUES.core.project.GetOrFail()
request.parent = 'projects/{}/locations/{}'.format(project, args.zone)
if args.IsSpecified('model'):
project = properties.VALUES.core.project.GetOrFail()
model = 'projects/{}/locations/{}/models/language-detection/{}'.format(
project, args.zone, args.model)
detect_language_request.model = model
if content_file:
if os.path.isfile(content_file):
detect_language_request.content = files.ReadFileContents(content_file)
else:
raise ContentFileError(
'Could not find --content-file [{}]. Content file must be a path '
'to a local file)'.format(content_file))
else:
detect_language_request.content = content
if args.IsSpecified('mime_type'):
detect_language_request.mimeType = args.mime_type
request.detectLanguageRequest = detect_language_request
return request
def UpdateRequestTranslateText(unused_instance_ref, args, request):
"""The hook to inject content into the translate request."""
content = args.content
content_file = args.content_file
messages = apis.GetMessagesModule(SPEECH_API, _GetApiVersion(args))
translate_text_request = messages.TranslateTextRequest()
project = properties.VALUES.core.project.GetOrFail()
request.parent = 'projects/{}/locations/{}'.format(project, args.zone)
if args.IsSpecified('model'):
project = properties.VALUES.core.project.GetOrFail()
model = 'projects/{}/locations/{}/models/{}'.format(
project, args.zone, args.model)
translate_text_request.model = model
if content_file:
if os.path.isfile(content_file):
translate_text_request.contents = [files.ReadFileContents(content_file)]
else:
raise ContentFileError(
'Could not find --content-file [{}]. Content file must be a path '
'to a local file)'.format(content_file))
else:
translate_text_request.contents = [content]
if args.IsSpecified('mime_type'):
translate_text_request.mimeType = args.mime_type
if args.IsSpecified('glossary_config'):
translate_text_request.glossaryConfig = \
messages.TranslateTextGlossaryConfig(glossary=args.glossaryConfig)
if args.IsSpecified('source_language'):
translate_text_request.sourceLanguageCode = args.source_language
translate_text_request.targetLanguageCode = args.target_language
request.translateTextRequest = translate_text_request
return request
def UpdateRequestGetSupportedLanguages(unused_instance_ref, args, request):
"""The hook to inject content into the getSupportedLanguages request."""
project = properties.VALUES.core.project.GetOrFail()
request.parent = 'projects/{}/locations/{}'.format(project, args.zone)
if args.IsSpecified('model'):
model = 'projects/{}/locations/{}/models/{}'.format(
project, args.zone, args.model)
request.model = model
return request
def UpdateRequestBatchTranslateText(unused_instance_ref, args, request):
"""The hook to inject content into the batch translate request."""
messages = apis.GetMessagesModule(SPEECH_API, _GetApiVersion(args))
batch_translate_text_request = messages.BatchTranslateTextRequest()
project = properties.VALUES.core.project.GetOrFail()
request.parent = 'projects/{}/locations/{}'.format(project, args.zone)
batch_translate_text_request.sourceLanguageCode = args.source_language
batch_translate_text_request.targetLanguageCodes = args.target_language_codes
batch_translate_text_request.outputConfig = messages.OutputConfig(
gcsDestination=messages.GcsDestination(outputUriPrefix=args.destination))
batch_translate_text_request.inputConfigs = \
[messages.InputConfig(gcsSource=messages.GcsSource(inputUri=k),
mimeType=v if v else None)
for k, v in sorted(args.source.items())]
if args.IsSpecified('models'):
batch_translate_text_request.models = \
messages.BatchTranslateTextRequest.ModelsValue(
additionalProperties=[
messages.BatchTranslateTextRequest.ModelsValue.AdditionalProperty(
key=k, value='projects/{}/locations/{}/models/{}'.format(
project, args.zone, v)) for k, v in sorted(args.models.items())
]
)
if args.IsSpecified('glossaries'):
additional_properties = \
[messages.BatchTranslateTextRequest.GlossariesValue.AdditionalProperty(
key=k, value=messages.TranslateTextGlossaryConfig(
glossary='projects/{}/locations/{}/glossaries/{}'.format(project, args.zone, v))) for k, v in sorted(args.glossaries.items())]
batch_translate_text_request.glossaries = \
messages.BatchTranslateTextRequest.GlossariesValue(
additionalProperties=additional_properties)
request.batchTranslateTextRequest = batch_translate_text_request
return request
| [
"[email protected]"
]
| |
93da478736f08c7706cfa5c4f92ee7e9ede8832a | 005f02cb534bbf91fe634fcf401441e1179365c8 | /9-Django Level 1/9.4-Template/TempApp/urls.py | dc94c5c67cb793a125ce2ecc85fce9a6e060985f | []
| no_license | Ruxhino-B/django-deployment-example | 220a39a456871a1bf42a64fd5b945731056fc7b9 | e19713ac1e11af202152ad20d7c3c94891a77e83 | refs/heads/master | 2020-04-18T02:21:10.505691 | 2020-01-06T14:18:18 | 2020-01-06T14:25:25 | 167,159,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | from django.urls import path
from TempApp import views
urlpatterns = [
path('',views.index, name = 'index')
]
| [
"[email protected]"
]
| |
4dd73191d9ab91cd9d86ab25481a619eed427197 | e9988eb38fd515baa386d8b06bb7cce30c34c50d | /sitevenv/lib/python2.7/site-packages/django/db/models/sql/query.py | 54394ea4e2f47197536dd8406b1cab8e3c5d7604 | []
| no_license | Arrrrrrrpit/Hire_station | 8c2f293677925d1053a4db964ee504d78c3738d8 | f33f044628082f1e034484b5c702fd66478aa142 | refs/heads/master | 2020-07-01T01:24:18.190530 | 2016-09-25T20:33:05 | 2016-09-25T20:33:05 | 201,007,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94,733 | py | """
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
import copy
import warnings
from collections import Counter, Iterator, Mapping, OrderedDict
from itertools import chain, count, product
from string import ascii_uppercase
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.models.aggregates import Count
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import Col, Ref
from django.db.models.fields.related_lookups import MultiColSource
from django.db.models.query_utils import (
PathInfo, Q, check_rel_lookup_compatibility, refs_expression,
)
from django.db.models.sql.constants import (
INNER, LOUTER, ORDER_DIR, ORDER_PATTERN, QUERY_TERMS, SINGLE,
)
from django.db.models.sql.datastructures import (
BaseTable, Empty, EmptyResultSet, Join, MultiJoin,
)
from django.db.models.sql.where import (
AND, OR, ExtraWhere, NothingNode, WhereNode,
)
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.tree import Node
__all__ = ['Query', 'RawQuery']
def get_field_names_from_opts(opts):
return set(chain.from_iterable(
(f.name, f.attname) if f.concrete else (f.name,)
for f in opts.get_fields()
))
class RawQuery(object):
"""
A single raw SQL query
"""
def __init__(self, sql, using, params=None, context=None):
self.params = params or ()
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.annotation_select = {}
self.context = context or {}
def clone(self, using):
return RawQuery(self.sql, using, params=self.params, context=self.context.copy())
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.column_name_converter
return [converter(column_meta[0])
for column_meta in self.cursor.description]
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<RawQuery: %s>" % self
@property
def params_type(self):
return dict if isinstance(self.params, Mapping) else tuple
def __str__(self):
return self.sql % self.params_type(self.params)
def _execute_query(self):
connection = connections[self.using]
# Adapt parameters to the database, as much as possible considering
# that the target type isn't known. See #17755.
params_type = self.params_type
adapter = connection.ops.adapt_unknown_value
if params_type is tuple:
params = tuple(adapter(val) for val in self.params)
elif params_type is dict:
params = dict((key, adapter(val)) for key, val in six.iteritems(self.params))
else:
raise RuntimeError("Unexpected params type: %s" % params_type)
self.cursor = connection.cursor()
self.cursor.execute(self.sql, params)
class Query(object):
"""
A single SQL query.
"""
alias_prefix = 'T'
subq_aliases = frozenset([alias_prefix])
query_terms = QUERY_TERMS
compiler = 'SQLCompiler'
def __init__(self, model, where=WhereNode):
self.model = model
self.alias_refcount = {}
# alias_map is the most important data structure regarding joins.
# It's used for recording which joins exist in the query and what
# types they are. The key is the alias of the joined table (possibly
# the table name) and the value is a Join-like object (see
# sql.datastructures.Join for more information).
self.alias_map = {}
# Sometimes the query contains references to aliases in outer queries (as
# a result of split_exclude). Correct alias quoting needs to know these
# aliases too.
self.external_aliases = set()
self.table_map = {} # Maps table names to list of aliases.
self.default_cols = True
self.default_ordering = True
self.standard_ordering = True
self.used_aliases = set()
self.filter_is_sticky = False
# SQL-related attributes
# Select and related select clauses are expressions to use in the
# SELECT clause of the query.
# The select is used for cases where we want to set up the select
# clause to contain other than default fields (values(), subqueries...)
# Note that annotations go to annotations dictionary.
self.select = []
self.tables = [] # Aliases in the order they are created.
self.where = where()
self.where_class = where
# The group_by attribute can have one of the following forms:
# - None: no group by at all in the query
# - A list of expressions: group by (at least) those expressions.
# String refs are also allowed for now.
# - True: group by all select fields of the model
# See compiler.get_group_by() for details.
self.group_by = None
self.order_by = []
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.distinct = False
self.distinct_fields = []
self.select_for_update = False
self.select_for_update_nowait = False
self.select_related = False
# Arbitrary limit for select_related to prevents infinite recursion.
self.max_depth = 5
# Holds the selects defined by a call to values() or values_list()
# excluding annotation_select and extra_select.
self.values_select = []
# SQL annotation-related attributes
# The _annotations will be an OrderedDict when used. Due to the cost
# of creating OrderedDict this attribute is created lazily (in
# self.annotations property).
self._annotations = None # Maps alias -> Annotation Expression
self.annotation_select_mask = None
self._annotation_select_cache = None
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
# The _extra attribute is an OrderedDict, lazily created similarly to
# .annotations
self._extra = None # Maps col_alias -> (col_sql, params).
self.extra_select_mask = None
self._extra_select_cache = None
self.extra_tables = ()
self.extra_order_by = ()
# A tuple that is a set of model field names and either True, if these
# are the fields to defer, or False if these are the only fields to
# load.
self.deferred_loading = (set(), True)
self.context = {}
@property
def extra(self):
if self._extra is None:
self._extra = OrderedDict()
return self._extra
@property
def annotations(self):
if self._annotations is None:
self._annotations = OrderedDict()
return self._annotations
def __str__(self):
"""
Returns the query as a string of SQL with the parameter values
substituted in (use sql_with_params() to see the unsubstituted string).
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.sql_with_params()
return sql % params
def sql_with_params(self):
"""
Returns the query as an SQL string and the parameters that will be
substituted into the query.
"""
return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
def __deepcopy__(self, memo):
result = self.clone(memo=memo)
memo[id(self)] = result
return result
def _prepare(self, field):
return self
def get_compiler(self, using=None, connection=None):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
return connection.ops.compiler(self.compiler)(self, connection, using)
def get_meta(self):
"""
Returns the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
return self.model._meta
def clone(self, klass=None, memo=None, **kwargs):
"""
Creates a copy of the current instance. The 'kwargs' parameter can be
used by clients to update attributes after copying has taken place.
"""
obj = Empty()
obj.__class__ = klass or self.__class__
obj.model = self.model
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.external_aliases = self.external_aliases.copy()
obj.table_map = self.table_map.copy()
obj.default_cols = self.default_cols
obj.default_ordering = self.default_ordering
obj.standard_ordering = self.standard_ordering
obj.select = self.select[:]
obj.tables = self.tables[:]
obj.where = self.where.clone()
obj.where_class = self.where_class
if self.group_by is None:
obj.group_by = None
elif self.group_by is True:
obj.group_by = True
else:
obj.group_by = self.group_by[:]
obj.order_by = self.order_by[:]
obj.low_mark, obj.high_mark = self.low_mark, self.high_mark
obj.distinct = self.distinct
obj.distinct_fields = self.distinct_fields[:]
obj.select_for_update = self.select_for_update
obj.select_for_update_nowait = self.select_for_update_nowait
obj.select_related = self.select_related
obj.values_select = self.values_select[:]
obj._annotations = self._annotations.copy() if self._annotations is not None else None
if self.annotation_select_mask is None:
obj.annotation_select_mask = None
else:
obj.annotation_select_mask = self.annotation_select_mask.copy()
# _annotation_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both annotations and
# _annotation_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._annotation_select_cache = None
obj.max_depth = self.max_depth
obj._extra = self._extra.copy() if self._extra is not None else None
if self.extra_select_mask is None:
obj.extra_select_mask = None
else:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is None:
obj._extra_select_cache = None
else:
obj._extra_select_cache = self._extra_select_cache.copy()
obj.extra_tables = self.extra_tables
obj.extra_order_by = self.extra_order_by
obj.deferred_loading = copy.copy(self.deferred_loading[0]), self.deferred_loading[1]
if self.filter_is_sticky and self.used_aliases:
obj.used_aliases = self.used_aliases.copy()
else:
obj.used_aliases = set()
obj.filter_is_sticky = False
if 'alias_prefix' in self.__dict__:
obj.alias_prefix = self.alias_prefix
if 'subq_aliases' in self.__dict__:
obj.subq_aliases = self.subq_aliases.copy()
obj.__dict__.update(kwargs)
if hasattr(obj, '_setup_query'):
obj._setup_query()
obj.context = self.context.copy()
return obj
def add_context(self, key, value):
self.context[key] = value
def get_context(self, key, default=None):
return self.context.get(key, default)
def relabeled_clone(self, change_map):
clone = self.clone()
clone.change_aliases(change_map)
return clone
def rewrite_cols(self, annotation, col_cnt):
# We must make sure the inner query has the referred columns in it.
# If we are aggregating over an annotation, then Django uses Ref()
# instances to note this. However, if we are annotating over a column
# of a related model, then it might be that column isn't part of the
# SELECT clause of the inner query, and we must manually make sure
# the column is selected. An example case is:
# .aggregate(Sum('author__awards'))
# Resolving this expression results in a join to author, but there
# is no guarantee the awards column of author is in the select clause
# of the query. Thus we must manually add the column to the inner
# query.
orig_exprs = annotation.get_source_expressions()
new_exprs = []
for expr in orig_exprs:
if isinstance(expr, Ref):
# Its already a Ref to subquery (see resolve_ref() for
# details)
new_exprs.append(expr)
elif isinstance(expr, Col):
# Reference to column. Make sure the referenced column
# is selected.
col_cnt += 1
col_alias = '__col%d' % col_cnt
self.annotations[col_alias] = expr
self.append_annotation_mask([col_alias])
new_exprs.append(Ref(col_alias, expr))
else:
# Some other expression not referencing database values
# directly. Its subexpression might contain Cols.
new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
new_exprs.append(new_expr)
annotation.set_source_expressions(new_exprs)
return annotation, col_cnt
def get_aggregation(self, using, added_aggregate_names):
"""
Returns the dictionary with the values of the existing aggregations.
"""
if not self.annotation_select:
return {}
has_limit = self.low_mark != 0 or self.high_mark is not None
has_existing_annotations = any(
annotation for alias, annotation
in self.annotations.items()
if alias not in added_aggregate_names
)
# Decide if we need to use a subquery.
#
# Existing annotations would cause incorrect results as get_aggregation()
# must produce just one result and thus must not use GROUP BY. But we
# aren't smart enough to remove the existing annotations from the
# query, so those would force us to use GROUP BY.
#
# If the query has limit or distinct, then those operations must be
# done in a subquery so that we are aggregating on the limit and/or
# distinct results instead of applying the distinct and limit after the
# aggregation.
if (isinstance(self.group_by, list) or has_limit or has_existing_annotations or
self.distinct):
from django.db.models.sql.subqueries import AggregateQuery
outer_query = AggregateQuery(self.model)
inner_query = self.clone()
inner_query.select_for_update = False
inner_query.select_related = False
if not has_limit and not self.distinct_fields:
# Queries with distinct_fields need ordering and when a limit
# is applied we must take the slice from the ordered query.
# Otherwise no need for ordering.
inner_query.clear_ordering(True)
if not inner_query.distinct:
# If the inner query uses default select and it has some
# aggregate annotations, then we must make sure the inner
# query is grouped by the main model's primary key. However,
# clearing the select clause can alter results if distinct is
# used.
if inner_query.default_cols and has_existing_annotations:
inner_query.group_by = [self.model._meta.pk.get_col(inner_query.get_initial_alias())]
inner_query.default_cols = False
relabels = {t: 'subquery' for t in inner_query.tables}
relabels[None] = 'subquery'
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
col_cnt = 0
for alias, expression in list(inner_query.annotation_select.items()):
if expression.is_summary:
expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt)
outer_query.annotations[alias] = expression.relabeled_clone(relabels)
del inner_query.annotations[alias]
# Make sure the annotation_select wont use cached results.
inner_query.set_annotation_mask(inner_query.annotation_select_mask)
if inner_query.select == [] and not inner_query.default_cols and not inner_query.annotation_select_mask:
# In case of Model.objects[0:3].count(), there would be no
# field selected in the inner query, yet we must use a subquery.
# So, make sure at least one field is selected.
inner_query.select = [self.model._meta.pk.get_col(inner_query.get_initial_alias())]
try:
outer_query.add_subquery(inner_query, using)
except EmptyResultSet:
return {
alias: None
for alias in outer_query.annotation_select
}
else:
outer_query = self
self.select = []
self.default_cols = False
self._extra = {}
outer_query.clear_ordering(True)
outer_query.clear_limits()
outer_query.select_for_update = False
outer_query.select_related = False
compiler = outer_query.get_compiler(using)
result = compiler.execute_sql(SINGLE)
if result is None:
result = [None for q in outer_query.annotation_select.items()]
converters = compiler.get_converters(outer_query.annotation_select.values())
result = compiler.apply_converters(result, converters)
return {
alias: val
for (alias, annotation), val
in zip(outer_query.annotation_select.items(), result)
}
def get_count(self, using):
"""
Performs a COUNT() query using the current filter constraints.
"""
obj = self.clone()
obj.add_annotation(Count('*'), alias='__count', is_summary=True)
number = obj.get_aggregation(using, ['__count'])['__count']
if number is None:
number = 0
return number
def has_filters(self):
return self.where
def has_results(self, using):
q = self.clone()
if not q.distinct:
if q.group_by is True:
q.add_fields((f.attname for f in self.model._meta.concrete_fields), False)
q.set_group_by()
q.clear_select_clause()
q.clear_ordering(True)
q.set_limits(high=1)
compiler = q.get_compiler(using=using)
return compiler.has_results()
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
assert self.model == rhs.model, \
"Cannot combine queries on two different base models."
assert self.can_filter(), \
"Cannot combine queries once a slice has been taken."
assert self.distinct == rhs.distinct, \
"Cannot combine a unique query with a non-unique query."
assert self.distinct_fields == rhs.distinct_fields, \
"Cannot combine queries with different distinct fields."
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
conjunction = (connector == AND)
# Determine which existing joins can be reused. When combining the
# query with AND we must recreate all joins for m2m filters. When
# combining with OR we can reuse joins. The reason is that in AND
# case a single row can't fulfill a condition like:
# revrel__col=1 & revrel__col=2
# But, there might be two different related rows matching this
# condition. In OR case a single True is enough, so single row is
# enough, too.
#
# Note that we will be creating duplicate joins for non-m2m joins in
# the AND case. The results will be correct but this creates too many
# joins. This is something that could be fixed later on.
reuse = set() if conjunction else set(self.tables)
# Base table must be present in the query - this is the same
# table on both sides.
self.get_initial_alias()
joinpromoter = JoinPromoter(connector, 2, False)
joinpromoter.add_votes(
j for j in self.alias_map if self.alias_map[j].join_type == INNER)
rhs_votes = set()
# Now, add the joins from rhs query into the new query (skipping base
# table).
for alias in rhs.tables[1:]:
join = rhs.alias_map[alias]
# If the left side of the join was already relabeled, use the
# updated alias.
join = join.relabeled_clone(change_map)
new_alias = self.join(join, reuse=reuse)
if join.join_type == INNER:
rhs_votes.add(new_alias)
# We can't reuse the same join again in the query. If we have two
# distinct joins for the same connection in rhs query, then the
# combined query must have two joins, too.
reuse.discard(new_alias)
if alias != new_alias:
change_map[alias] = new_alias
if not rhs.alias_refcount[alias]:
# The alias was unused in the rhs query. Unref it so that it
# will be unused in the new query, too. We have to add and
# unref the alias so that join promotion has information of
# the join type for the unused alias.
self.unref_alias(new_alias)
joinpromoter.add_votes(rhs_votes)
joinpromoter.update_join_types(self)
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
w = rhs.where.clone()
w.relabel_aliases(change_map)
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
self.select = []
for col in rhs.select:
self.add_select(col.relabeled_clone(change_map))
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self._extra and rhs._extra:
raise ValueError("When merging querysets using 'or', you cannot have extra(select=...) on both sides.")
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by[:] if rhs.order_by else self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def deferred_to_data(self, target, callback):
"""
Converts the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialized on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
The "target" parameter is the instance that is populated (in place).
The "callback" is a function that is called whenever a (model, field)
pair need to be added to "target". It accepts three parameters:
"target", and the model and list of fields being added for that model.
"""
field_names, defer = self.deferred_loading
if not field_names:
return
orig_opts = self.get_meta()
seen = {}
must_include = {orig_opts.concrete_model: {orig_opts.pk}}
for field_name in field_names:
parts = field_name.split(LOOKUP_SEP)
cur_model = self.model._meta.concrete_model
opts = orig_opts
for name in parts[:-1]:
old_model = cur_model
source = opts.get_field(name)
if is_reverse_o2o(source):
cur_model = source.related_model
else:
cur_model = source.remote_field.model
opts = cur_model._meta
# Even if we're "just passing through" this model, we must add
# both the current model's pk and the related reference field
# (if it's not a reverse relation) to the things we select.
if not is_reverse_o2o(source):
must_include[old_model].add(source)
add_to_dict(must_include, cur_model, opts.pk)
field = opts.get_field(parts[-1])
is_reverse_object = field.auto_created and not field.concrete
model = field.related_model if is_reverse_object else field.model
model = model._meta.concrete_model
if model == opts.model:
model = cur_model
if not is_reverse_o2o(field):
add_to_dict(seen, model, field)
if defer:
# We need to load all fields for each model, except those that
# appear in "seen" (for all models that appear in "seen"). The only
# slight complexity here is handling fields that exist on parent
# models.
workset = {}
for model, values in six.iteritems(seen):
for field in model._meta.fields:
if field in values:
continue
m = field.model._meta.concrete_model
add_to_dict(workset, m, field)
for model, values in six.iteritems(must_include):
# If we haven't included a model in workset, we don't add the
# corresponding must_include fields for that model, since an
# empty set means "include all fields". That's why there's no
# "else" branch here.
if model in workset:
workset[model].update(values)
for model, values in six.iteritems(workset):
callback(target, model, values)
else:
for model, values in six.iteritems(must_include):
if model in seen:
seen[model].update(values)
else:
# As we've passed through this model, but not explicitly
# included any fields, we have to make sure it's mentioned
# so that only the "must include" fields are pulled in.
seen[model] = values
# Now ensure that every model in the inheritance chain is mentioned
# in the parent list. Again, it must be mentioned to ensure that
# only "must include" fields are pulled in.
for model in orig_opts.get_parent_list():
if model not in seen:
seen[model] = set()
for model, values in six.iteritems(seen):
callback(target, model, values)
def table_alias(self, table_name, create=False):
"""
Returns a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
alias_list = self.table_map.get(table_name)
if not create and alias_list:
alias = alias_list[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if alias_list:
alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)
alias_list.append(alias)
else:
# The first occurrence of a table uses the table name directly.
alias = table_name
self.table_map[alias] = [alias]
self.alias_refcount[alias] = 1
self.tables.append(alias)
return alias, True
def ref_alias(self, alias):
""" Increases the reference count for this alias. """
self.alias_refcount[alias] += 1
def unref_alias(self, alias, amount=1):
""" Decreases the reference count for this alias. """
self.alias_refcount[alias] -= amount
def promote_joins(self, aliases):
"""
Promotes recursively the join type of given aliases and its children to
an outer join. If 'unconditional' is False, the join is only promoted if
it is nullable or the parent join is an outer join.
The children promotion is done to avoid join chains that contain a LOUTER
b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted,
then we must also promote b->c automatically, or otherwise the promotion
of a->b doesn't actually change anything in the query results.
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type is None:
# This is the base table (first FROM entry) - this table
# isn't really joined at all in the query, so we should not
# alter its join type.
continue
# Only the first alias (skipped above) should have None join_type
assert self.alias_map[alias].join_type is not None
parent_alias = self.alias_map[alias].parent_alias
parent_louter = parent_alias and self.alias_map[parent_alias].join_type == LOUTER
already_louter = self.alias_map[alias].join_type == LOUTER
if ((self.alias_map[alias].nullable or parent_louter) and
not already_louter):
self.alias_map[alias] = self.alias_map[alias].promote()
# Join type of 'alias' changed, so re-examine all aliases that
# refer to this one.
aliases.extend(
join for join in self.alias_map.keys()
if self.alias_map[join].parent_alias == alias and join not in aliases
)
def demote_joins(self, aliases):
"""
Change join type from LOUTER to INNER for all joins in aliases.
Similarly to promote_joins(), this method must ensure no join chains
containing first an outer, then an inner join are generated. If we
are demoting b->c join in chain a LOUTER b LOUTER c then we must
demote a->b automatically, or otherwise the demotion of b->c doesn't
actually change anything in the query results. .
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type == LOUTER:
self.alias_map[alias] = self.alias_map[alias].demote()
parent_alias = self.alias_map[alias].parent_alias
if self.alias_map[parent_alias].join_type == INNER:
aliases.append(parent_alias)
def reset_refcounts(self, to_counts):
"""
This method will reset reference counts for aliases so that they match
the value passed in :param to_counts:.
"""
for alias, cur_refcount in self.alias_refcount.copy().items():
unref_amount = cur_refcount - to_counts.get(alias, 0)
self.unref_alias(alias, unref_amount)
def change_aliases(self, change_map):
"""
Changes the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
assert set(change_map.keys()).intersection(set(change_map.values())) == set()
def relabel_column(col):
if isinstance(col, (list, tuple)):
old_alias = col[0]
return (change_map.get(old_alias, old_alias), col[1])
else:
return col.relabeled_clone(change_map)
# 1. Update references in "select" (normal columns plus aliases),
# "group by" and "where".
self.where.relabel_aliases(change_map)
if isinstance(self.group_by, list):
self.group_by = [relabel_column(col) for col in self.group_by]
self.select = [col.relabeled_clone(change_map) for col in self.select]
if self._annotations:
self._annotations = OrderedDict(
(key, relabel_column(col)) for key, col in self._annotations.items())
# 2. Rename the alias in the internal table/alias datastructures.
for old_alias, new_alias in six.iteritems(change_map):
if old_alias not in self.alias_map:
continue
alias_data = self.alias_map[old_alias].relabeled_clone(change_map)
self.alias_map[new_alias] = alias_data
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data.table_name]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
for pos, alias in enumerate(self.tables):
if alias == old_alias:
self.tables[pos] = new_alias
break
self.external_aliases = {change_map.get(alias, alias)
for alias in self.external_aliases}
def bump_prefix(self, outer_query):
"""
Changes the alias prefix to the next letter in the alphabet in a way
that the outer query's aliases and this query's aliases will not
conflict. Even tables that previously had no alias will get an alias
after this call.
"""
def prefix_gen():
"""
Generates a sequence of characters in alphabetical order:
-> 'A', 'B', 'C', ...
When the alphabet is finished, the sequence will continue with the
Cartesian product:
-> 'AA', 'AB', 'AC', ...
"""
alphabet = ascii_uppercase
prefix = chr(ord(self.alias_prefix) + 1)
yield prefix
for n in count(1):
seq = alphabet[alphabet.index(prefix):] if prefix else alphabet
for s in product(seq, repeat=n):
yield ''.join(s)
prefix = None
if self.alias_prefix != outer_query.alias_prefix:
# No clashes between self and outer query should be possible.
return
local_recursion_limit = 127 # explicitly avoid infinite loop
for pos, prefix in enumerate(prefix_gen()):
if prefix not in self.subq_aliases:
self.alias_prefix = prefix
break
if pos > local_recursion_limit:
raise RuntimeError(
'Maximum recursion depth exceeded: too many subqueries.'
)
self.subq_aliases = self.subq_aliases.union([self.alias_prefix])
outer_query.subq_aliases = outer_query.subq_aliases.union(self.subq_aliases)
change_map = OrderedDict()
for pos, alias in enumerate(self.tables):
new_alias = '%s%d' % (self.alias_prefix, pos)
change_map[alias] = new_alias
self.tables[pos] = new_alias
self.change_aliases(change_map)
def get_initial_alias(self):
"""
Returns the first alias for this query, after increasing its reference
count.
"""
if self.tables:
alias = self.tables[0]
self.ref_alias(alias)
else:
alias = self.join(BaseTable(self.get_meta().db_table, None))
return alias
def count_active_tables(self):
"""
Returns the number of tables in this query with a non-zero reference
count. Note that after execution, the reference counts are zeroed, so
tables added in compiler will not be seen by this method.
"""
return len([1 for count in self.alias_refcount.values() if count])
def join(self, join, reuse=None):
"""
Returns an alias for the join in 'connection', either reusing an
existing alias for that join or creating a new one. 'connection' is a
tuple (lhs, table, join_cols) where 'lhs' is either an existing
table alias or a table name. 'join_cols' is a tuple of tuples containing
columns to join on ((l_id1, r_id1), (l_id2, r_id2)). The join corresponds
to the SQL equivalent of::
lhs.l_id1 = table.r_id1 AND lhs.l_id2 = table.r_id2
The 'reuse' parameter can be either None which means all joins
(matching the connection) are reusable, or it can be a set containing
the aliases that can be reused.
A join is always created as LOUTER if the lhs alias is LOUTER to make
sure we do not generate chains like t1 LOUTER t2 INNER t3. All new
joins are created as LOUTER if nullable is True.
If 'nullable' is True, the join can potentially involve NULL values and
is a candidate for promotion (to "left outer") when combining querysets.
The 'join_field' is the field we are joining along (if any).
"""
reuse = [a for a, j in self.alias_map.items()
if (reuse is None or a in reuse) and j == join]
if reuse:
self.ref_alias(reuse[0])
return reuse[0]
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(join.table_name, create=True)
if join.join_type:
if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable:
join_type = LOUTER
else:
join_type = INNER
join.join_type = join_type
join.table_alias = alias
self.alias_map[alias] = join
return alias
def join_parent_model(self, opts, model, alias, seen):
"""
Makes sure the given 'model' is joined in the query. If 'model' isn't
a parent of 'opts' or if it is None this method is a no-op.
The 'alias' is the root alias for starting the join, 'seen' is a dict
of model -> alias of existing joins. It must also contain a mapping
of None -> some alias. This will be returned in the no-op case.
"""
if model in seen:
return seen[model]
chain = opts.get_base_chain(model)
if not chain:
return alias
curr_opts = opts
for int_model in chain:
if int_model in seen:
curr_opts = int_model._meta
alias = seen[int_model]
continue
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not curr_opts.parents[int_model]:
curr_opts = int_model._meta
continue
link_field = curr_opts.get_ancestor_link(int_model)
_, _, _, joins, _ = self.setup_joins(
[link_field.name], curr_opts, alias)
curr_opts = int_model._meta
alias = seen[int_model] = joins[-1]
return alias or seen[None]
def add_annotation(self, annotation, alias, is_summary=False):
"""
Adds a single annotation expression to the Query
"""
annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None,
summarize=is_summary)
self.append_annotation_mask([alias])
self.annotations[alias] = annotation
def prepare_lookup_value(self, value, lookups, can_reuse, allow_joins=True):
# Default lookup if none given is exact.
used_joins = []
if len(lookups) == 0:
lookups = ['exact']
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value.
if value is None:
if lookups[-1] not in ('exact', 'iexact'):
raise ValueError("Cannot use None as a query value")
lookups[-1] = 'isnull'
value = True
elif hasattr(value, 'resolve_expression'):
pre_joins = self.alias_refcount.copy()
value = value.resolve_expression(self, reuse=can_reuse, allow_joins=allow_joins)
used_joins = [k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)]
# Subqueries need to use a different set of aliases than the
# outer query. Call bump_prefix to change aliases of the inner
# query (the value).
if hasattr(value, 'query') and hasattr(value.query, 'bump_prefix'):
value = value._clone()
value.query.bump_prefix(self)
if hasattr(value, 'bump_prefix'):
value = value.clone()
value.bump_prefix(self)
# For Oracle '' is equivalent to null. The check needs to be done
# at this stage because join promotion can't be done at compiler
# stage. Using DEFAULT_DB_ALIAS isn't nice, but it is the best we
# can do here. Similar thing is done in is_nullable(), too.
if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and
lookups[-1] == 'exact' and value == ''):
value = True
lookups[-1] = 'isnull'
return value, lookups, used_joins
def solve_lookup_type(self, lookup):
"""
Solve the lookup type from the lookup (eg: 'foobar__id__icontains')
"""
lookup_splitted = lookup.split(LOOKUP_SEP)
if self._annotations:
expression, expression_lookups = refs_expression(lookup_splitted, self.annotations)
if expression:
return expression_lookups, (), expression
_, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())
field_parts = lookup_splitted[0:len(lookup_splitted) - len(lookup_parts)]
if len(lookup_parts) == 0:
lookup_parts = ['exact']
elif len(lookup_parts) > 1:
if not field_parts:
raise FieldError(
'Invalid lookup "%s" for model %s".' %
(lookup, self.get_meta().model.__name__))
return lookup_parts, field_parts, False
def check_query_object_type(self, value, opts, field):
"""
Checks whether the object passed while querying is of the correct type.
If not, it raises a ValueError specifying the wrong object.
"""
if hasattr(value, '_meta'):
if not check_rel_lookup_compatibility(value._meta.model, opts, field):
raise ValueError(
'Cannot query "%s": Must be "%s" instance.' %
(value, opts.object_name))
def check_related_objects(self, field, value, opts):
"""
Checks the type of object passed to query relations.
"""
if field.is_relation:
# QuerySets implement is_compatible_query_object_type() to
# determine compatibility with the given field.
if hasattr(value, 'is_compatible_query_object_type'):
if not value.is_compatible_query_object_type(opts, field):
raise ValueError(
'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' %
(value.model._meta.model_name, opts.object_name)
)
elif hasattr(value, '_meta'):
self.check_query_object_type(value, opts, field)
elif hasattr(value, '__iter__'):
for v in value:
self.check_query_object_type(v, opts, field)
def build_lookup(self, lookups, lhs, rhs):
"""
Tries to extract transforms and lookup from given lhs.
The lhs value is something that works like SQLExpression.
The rhs value is what the lookup is going to compare against.
The lookups is a list of names to extract using get_lookup()
and get_transform().
"""
lookups = lookups[:]
while lookups:
name = lookups[0]
# If there is just one part left, try first get_lookup() so
# that if the lhs supports both transform and lookup for the
# name, then lookup will be picked.
if len(lookups) == 1:
final_lookup = lhs.get_lookup(name)
if not final_lookup:
# We didn't find a lookup. We are going to interpret
# the name as transform, and do an Exact lookup against
# it.
lhs = self.try_transform(lhs, name, lookups)
final_lookup = lhs.get_lookup('exact')
return final_lookup(lhs, rhs)
lhs = self.try_transform(lhs, name, lookups)
lookups = lookups[1:]
def try_transform(self, lhs, name, rest_of_lookups):
"""
Helper method for build_lookup. Tries to fetch and initialize
a transform for name parameter from lhs.
"""
transform_class = lhs.get_transform(name)
if transform_class:
return transform_class(lhs)
else:
raise FieldError(
"Unsupported lookup '%s' for %s or join on the field not "
"permitted." %
(name, lhs.output_field.__class__.__name__))
def build_filter(self, filter_expr, branch_negated=False, current_negated=False,
can_reuse=None, connector=AND, allow_joins=True, split_subq=True):
"""
Builds a WhereNode for a single filter clause, but doesn't add it
to this Query. Query.add_q() will then add this filter to the where
Node.
The 'branch_negated' tells us if the current branch contains any
negations. This will be used to determine if subqueries are needed.
The 'current_negated' is used to determine if the current filter is
negated or not and this will be used to determine if IS NULL filtering
is needed.
The difference between current_netageted and branch_negated is that
branch_negated is set on first negation, but current_negated is
flipped for each negation.
Note that add_filter will not do any negating itself, that is done
upper in the code by add_q().
The 'can_reuse' is a set of reusable joins for multijoins.
The method will create a filter clause that can be added to the current
query. However, if the filter isn't added to the query then the caller
is responsible for unreffing the joins used.
"""
if isinstance(filter_expr, dict):
raise FieldError("Cannot parse keyword query as dict")
arg, value = filter_expr
if not arg:
raise FieldError("Cannot parse keyword query %r" % arg)
lookups, parts, reffed_expression = self.solve_lookup_type(arg)
if not allow_joins and len(parts) > 1:
raise FieldError("Joined field references are not permitted in this query")
# Work out the lookup type and remove it from the end of 'parts',
# if necessary.
value, lookups, used_joins = self.prepare_lookup_value(value, lookups, can_reuse, allow_joins)
clause = self.where_class()
if reffed_expression:
condition = self.build_lookup(lookups, reffed_expression, value)
clause.add(condition, AND)
return clause, []
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = not branch_negated or not split_subq
try:
field, sources, opts, join_list, path = self.setup_joins(
parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many)
# Prevent iterator from being consumed by check_related_objects()
if isinstance(value, Iterator):
value = list(value)
self.check_related_objects(field, value, opts)
# split_exclude() needs to know which joins were generated for the
# lookup parts
self._lookup_joins = join_list
except MultiJoin as e:
return self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]),
can_reuse, e.names_with_path)
if can_reuse is not None:
can_reuse.update(join_list)
used_joins = set(used_joins).union(set(join_list))
targets, alias, join_list = self.trim_joins(sources, join_list, path)
if field.is_relation:
# No support for transforms for relational fields
num_lookups = len(lookups)
if num_lookups > 1:
raise FieldError('Related Field got invalid lookup: {}'.format(lookups[0]))
assert num_lookups > 0 # Likely a bug in Django if this fails.
lookup_class = field.get_lookup(lookups[0])
if len(targets) == 1:
lhs = targets[0].get_col(alias, field)
else:
lhs = MultiColSource(alias, targets, sources, field)
condition = lookup_class(lhs, value)
lookup_type = lookup_class.lookup_name
else:
col = targets[0].get_col(alias, field)
condition = self.build_lookup(lookups, col, value)
lookup_type = condition.lookup_name
clause.add(condition, AND)
require_outer = lookup_type == 'isnull' and value is True and not current_negated
if current_negated and (lookup_type != 'isnull' or value is False):
require_outer = True
if (lookup_type != 'isnull' and (
self.is_nullable(targets[0]) or
self.alias_map[join_list[-1]].join_type == LOUTER)):
# The condition added here will be SQL like this:
# NOT (col IS NOT NULL), where the first NOT is added in
# upper layers of code. The reason for addition is that if col
# is null, then col != someval will result in SQL "unknown"
# which isn't the same as in Python. The Python None handling
# is wanted, and it can be gotten by
# (col IS NULL OR col != someval)
# <=>
# NOT (col IS NOT NULL AND col = someval).
lookup_class = targets[0].get_lookup('isnull')
clause.add(lookup_class(targets[0].get_col(alias, sources[0]), False), AND)
return clause, used_joins if not require_outer else ()
def add_filter(self, filter_clause):
self.add_q(Q(**{filter_clause[0]: filter_clause[1]}))
def add_q(self, q_object):
"""
A preprocessor for the internal _add_q(). Responsible for doing final
join promotion.
"""
# For join promotion this case is doing an AND for the added q_object
# and existing conditions. So, any existing inner join forces the join
# type to remain inner. Existing outer joins can however be demoted.
# (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if
# rel_a doesn't produce any rows, then the whole condition must fail.
# So, demotion is OK.
existing_inner = set(
(a for a in self.alias_map if self.alias_map[a].join_type == INNER))
clause, _ = self._add_q(q_object, self.used_aliases)
if clause:
self.where.add(clause, AND)
self.demote_joins(existing_inner)
def _add_q(self, q_object, used_aliases, branch_negated=False,
current_negated=False, allow_joins=True, split_subq=True):
"""
Adds a Q-object to the current filter.
"""
connector = q_object.connector
current_negated = current_negated ^ q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = self.where_class(connector=connector,
negated=q_object.negated)
joinpromoter = JoinPromoter(q_object.connector, len(q_object.children), current_negated)
for child in q_object.children:
if isinstance(child, Node):
child_clause, needed_inner = self._add_q(
child, used_aliases, branch_negated,
current_negated, allow_joins, split_subq)
joinpromoter.add_votes(needed_inner)
else:
child_clause, needed_inner = self.build_filter(
child, can_reuse=used_aliases, branch_negated=branch_negated,
current_negated=current_negated, connector=connector,
allow_joins=allow_joins, split_subq=split_subq,
)
joinpromoter.add_votes(needed_inner)
if child_clause:
target_clause.add(child_clause, connector)
needed_inner = joinpromoter.update_join_types(self)
return target_clause, needed_inner
def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False):
"""
Walks the list of names and turns them into PathInfo tuples. Note that
a single name in 'names' can generate multiple PathInfos (m2m for
example).
'names' is the path of names to travel, 'opts' is the model Options we
start the name resolving from, 'allow_many' is as for setup_joins().
If fail_on_missing is set to True, then a name that can't be resolved
will generate a FieldError.
Returns a list of PathInfo tuples. In addition returns the final field
(the last used join field), and target (which is a field guaranteed to
contain the same value as the final field). Finally, the method returns
those names that weren't found (which are likely transforms and the
final lookup).
"""
path, names_with_path = [], []
for pos, name in enumerate(names):
cur_names_with_path = (name, [])
if name == 'pk':
name = opts.pk.name
field = None
try:
field = opts.get_field(name)
except FieldDoesNotExist:
if name in self.annotation_select:
field = self.annotation_select[name].output_field
elif pos == 0:
for rel in opts.related_objects:
if (name == rel.related_model._meta.model_name and
rel.related_name == rel.related_model._meta.default_related_name):
related_name = rel.related_name
field = opts.get_field(related_name)
warnings.warn(
"Query lookup '%s' is deprecated in favor of "
"Meta.default_related_name '%s'."
% (name, related_name),
RemovedInDjango20Warning, 2
)
break
if field is not None:
# Fields that contain one-to-many relations with a generic
# model (like a GenericForeignKey) cannot generate reverse
# relations and therefore cannot be used for reverse querying.
if field.is_relation and not field.related_model:
raise FieldError(
"Field %r does not generate an automatic reverse "
"relation and therefore cannot be used for reverse "
"querying. If it is a GenericForeignKey, consider "
"adding a GenericRelation." % name
)
try:
model = field.model._meta.concrete_model
except AttributeError:
model = None
else:
# We didn't find the current field, so move position back
# one step.
pos -= 1
if pos == -1 or fail_on_missing:
field_names = list(get_field_names_from_opts(opts))
available = sorted(field_names + list(self.annotation_select))
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(available)))
break
# Check if we need any joins for concrete inheritance cases (the
# field lives in parent, but we are currently in one of its
# children)
if model is not opts.model:
# The field lives on a base class of the current model.
# Skip the chain of proxy to the concrete proxied model
proxied_model = opts.concrete_model
for int_model in opts.get_base_chain(model):
if int_model is proxied_model:
opts = int_model._meta
else:
final_field = opts.parents[int_model]
targets = (final_field.remote_field.get_related_field(),)
opts = int_model._meta
path.append(PathInfo(final_field.model._meta, opts, targets, final_field, False, True))
cur_names_with_path[1].append(
PathInfo(final_field.model._meta, opts, targets, final_field, False, True)
)
if hasattr(field, 'get_path_info'):
pathinfos = field.get_path_info()
if not allow_many:
for inner_pos, p in enumerate(pathinfos):
if p.m2m:
cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1])
names_with_path.append(cur_names_with_path)
raise MultiJoin(pos + 1, names_with_path)
last = pathinfos[-1]
path.extend(pathinfos)
final_field = last.join_field
opts = last.to_opts
targets = last.target_fields
cur_names_with_path[1].extend(pathinfos)
names_with_path.append(cur_names_with_path)
else:
# Local non-relational field.
final_field = field
targets = (field,)
if fail_on_missing and pos + 1 != len(names):
raise FieldError(
"Cannot resolve keyword %r into field. Join on '%s'"
" not permitted." % (names[pos + 1], name))
break
return path, final_field, targets, names[pos + 1:]
def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are starting from), 'alias' is the alias for
the table to start the joining from.
The 'can_reuse' defines the reverse foreign key joins we can reuse. It
can be None in which case all joins are reusable or a set of aliases
that can be reused. Note that non-reverse foreign keys are always
reusable when using setup_joins().
If 'allow_many' is False, then any reverse foreign key seen will
generate a MultiJoin exception.
Returns the final field involved in the joins, the target field (used
for any 'where' constraint), the final 'opts' value, the joins and the
field path travelled to generate the joins.
The target field is the field containing the concrete value. Final
field can be something different, for example foreign key pointing to
that value. Final field is needed for example in some value
conversions (convert 'obj' in fk__id=obj to pk val using the foreign
key field for example).
"""
joins = [alias]
# First, generate the path for the names
path, final_field, targets, rest = self.names_to_path(
names, opts, allow_many, fail_on_missing=True)
# Then, add the path to the query's joins. Note that we can't trim
# joins at this stage - we will need the information about join type
# of the trimmed joins.
for join in path:
opts = join.to_opts
if join.direct:
nullable = self.is_nullable(join.join_field)
else:
nullable = True
connection = Join(opts.db_table, alias, None, INNER, join.join_field, nullable)
reuse = can_reuse if join.m2m else None
alias = self.join(connection, reuse=reuse)
joins.append(alias)
return final_field, targets, opts, joins, path
def trim_joins(self, targets, joins, path):
"""
The 'target' parameter is the final field being joined to, 'joins'
is the full list of join aliases. The 'path' contain the PathInfos
used to create the joins.
Returns the final target field and table alias and the new active
joins.
We will always trim any direct join if we have the target column
available already in the previous table. Reverse joins can't be
trimmed as we don't know if there is anything on the other side of
the join.
"""
joins = joins[:]
for pos, info in enumerate(reversed(path)):
if len(joins) == 1 or not info.direct:
break
join_targets = set(t.column for t in info.join_field.foreign_related_fields)
cur_targets = set(t.column for t in targets)
if not cur_targets.issubset(join_targets):
break
targets_dict = {r[1].column: r[0] for r in info.join_field.related_fields if r[1].column in cur_targets}
targets = tuple(targets_dict[t.column] for t in targets)
self.unref_alias(joins.pop())
return targets, joins[-1], joins
def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False):
if not allow_joins and LOOKUP_SEP in name:
raise FieldError("Joined field references are not permitted in this query")
if name in self.annotations:
if summarize:
# Summarize currently means we are doing an aggregate() query
# which is executed as a wrapped subquery if any of the
# aggregate() elements reference an existing annotation. In
# that case we need to return a Ref to the subquery's annotation.
return Ref(name, self.annotation_select[name])
else:
return self.annotation_select[name]
else:
field_list = name.split(LOOKUP_SEP)
field, sources, opts, join_list, path = self.setup_joins(
field_list, self.get_meta(),
self.get_initial_alias(), reuse)
targets, _, join_list = self.trim_joins(sources, join_list, path)
if len(targets) > 1:
raise FieldError("Referencing multicolumn fields with F() objects "
"isn't supported")
if reuse is not None:
reuse.update(join_list)
col = targets[0].get_col(join_list[-1], sources[0])
return col
def split_exclude(self, filter_expr, prefix, can_reuse, names_with_path):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
As an example we could have original filter ~Q(child__name='foo').
We would get here with filter_expr = child__name, prefix = child and
can_reuse is a set of joins usable for filters in the original query.
We will turn this into equivalent of:
WHERE NOT (pk IN (SELECT parent_id FROM thetable
WHERE name = 'foo' AND parent_id IS NOT NULL))
It might be worth it to consider using WHERE NOT EXISTS as that has
saner null handling, and is easier for the backend's optimizer to
handle.
"""
# Generate the inner query.
query = Query(self.model)
query.add_filter(filter_expr)
query.clear_ordering(True)
# Try to have as simple as possible subquery -> trim leading joins from
# the subquery.
trimmed_prefix, contains_louter = query.trim_start(names_with_path)
# Add extra check to make sure the selected field will not be null
# since we are adding an IN <subquery> clause. This prevents the
# database from tripping over IN (...,NULL,...) selects and returning
# nothing
col = query.select[0]
select_field = col.target
alias = col.alias
if self.is_nullable(select_field):
lookup_class = select_field.get_lookup('isnull')
lookup = lookup_class(select_field.get_col(alias), False)
query.where.add(lookup, AND)
if alias in can_reuse:
pk = select_field.model._meta.pk
# Need to add a restriction so that outer query's filters are in effect for
# the subquery, too.
query.bump_prefix(self)
lookup_class = select_field.get_lookup('exact')
# Note that the query.select[0].alias is different from alias
# due to bump_prefix above.
lookup = lookup_class(pk.get_col(query.select[0].alias),
pk.get_col(alias))
query.where.add(lookup, AND)
query.external_aliases.add(alias)
condition, needed_inner = self.build_filter(
('%s__in' % trimmed_prefix, query),
current_negated=True, branch_negated=True, can_reuse=can_reuse)
if contains_louter:
or_null_condition, _ = self.build_filter(
('%s__isnull' % trimmed_prefix, True),
current_negated=True, branch_negated=True, can_reuse=can_reuse)
condition.add(or_null_condition, OR)
# Note that the end result will be:
# (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL.
# This might look crazy but due to how IN works, this seems to be
# correct. If the IS NOT NULL check is removed then outercol NOT
# IN will return UNKNOWN. If the IS NULL check is removed, then if
# outercol IS NULL we will not match the row.
return condition, needed_inner
def set_empty(self):
self.where.add(NothingNode(), AND)
def is_empty(self):
return any(isinstance(c, NothingNode) for c in self.where.children)
def set_limits(self, low=None, high=None):
"""
Adjusts the limits on the rows retrieved. We use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, they are converted to the appropriate offset and limit values.
Any limits passed in here are applied relative to the existing
constraints. So low is added to the current low value and both will be
clamped to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
if self.low_mark == self.high_mark:
self.set_empty()
def clear_limits(self):
"""
Clears any existing limits.
"""
self.low_mark, self.high_mark = 0, None
def can_filter(self):
"""
Returns True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.low_mark and self.high_mark is None
def clear_select_clause(self):
"""
Removes all fields from SELECT clause.
"""
self.select = []
self.default_cols = False
self.select_related = False
self.set_extra_mask(())
self.set_annotation_mask(())
def clear_select_fields(self):
"""
Clears the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = []
self.values_select = []
def add_select(self, col):
self.default_cols = False
self.select.append(col)
def set_select(self, cols):
self.default_cols = False
self.select = cols
def add_distinct_fields(self, *field_names):
"""
Adds and resolves the given fields to the query's "distinct on" clause.
"""
self.distinct_fields = field_names
self.distinct = True
def add_fields(self, field_names, allow_m2m=True):
"""
Adds the given (model) fields to the select set. The field names are
added in the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
for name in field_names:
# Join promotion note - we must not remove any rows here, so
# if there is no existing joins, use outer join.
_, targets, _, joins, path = self.setup_joins(
name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m)
targets, final_alias, joins = self.trim_joins(targets, joins, path)
for target in targets:
self.add_select(target.get_col(final_alias))
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
if LOOKUP_SEP in name:
# For lookups spanning over relationships, show the error
# from the model on which the lookup failed.
raise
else:
names = sorted(list(get_field_names_from_opts(opts)) + list(self.extra) + list(self.annotation_select))
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
def add_ordering(self, *ordering):
"""
Adds items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or OrderBy
expressions.
If 'ordering' is empty, all ordering is cleared from the query.
"""
errors = []
for item in ordering:
if not hasattr(item, 'resolve_expression') and not ORDER_PATTERN.match(item):
errors.append(item)
if getattr(item, 'contains_aggregate', False):
raise FieldError(
'Using an aggregate in order_by() without also including '
'it in annotate() is not allowed: %s' % item
)
if errors:
raise FieldError('Invalid order_by arguments: %s' % errors)
if ordering:
self.order_by.extend(ordering)
else:
self.default_ordering = False
def clear_ordering(self, force_empty):
"""
Removes any ordering settings. If 'force_empty' is True, there will be
no ordering in the resulting query (not even the model's default).
"""
self.order_by = []
self.extra_order_by = ()
if force_empty:
self.default_ordering = False
def set_group_by(self):
"""
Expands the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
self.group_by = []
for col in self.select:
self.group_by.append(col)
if self.annotation_select:
for alias, annotation in six.iteritems(self.annotation_select):
for col in annotation.get_group_by_cols():
self.group_by.append(col)
def add_select_related(self, fields):
"""
Sets up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
if isinstance(self.select_related, bool):
field_dict = {}
else:
field_dict = self.select_related
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Adds data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = OrderedDict()
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
entry = force_text(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
if pos == 0 or entry[pos - 1] != '%':
entry_params.append(next(param_iter))
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
# This is order preserving, since self.extra_select is an OrderedDict.
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""
Remove any fields from the deferred loading set.
"""
self.deferred_loading = (set(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. The new field names are added to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL column names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
self.deferred_loading = existing.difference(field_names), False
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, those
names are removed from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
field_names = set(field_names)
if 'pk' in field_names:
field_names.remove('pk')
field_names.add(self.get_meta().pk.name)
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = field_names.difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = field_names, False
def get_loaded_field_names(self):
"""
If any fields are marked to be deferred, returns a dictionary mapping
models to a set of names in those fields that will be loaded. If a
model is not in the returned dictionary, none of its fields are
deferred.
If no fields are marked for deferral, returns an empty dictionary.
"""
# We cache this because we call this function multiple times
# (compiler.fill_related_selections, query.iterator)
try:
return self._loaded_field_names_cache
except AttributeError:
collection = {}
self.deferred_to_data(collection, self.get_loaded_field_names_cb)
self._loaded_field_names_cache = collection
return collection
def get_loaded_field_names_cb(self, target, model, fields):
"""
Callback used by get_deferred_field_names().
"""
target[model] = {f.attname for f in fields}
def set_annotation_mask(self, names):
"Set the mask of annotations that will actually be returned by the SELECT"
if names is None:
self.annotation_select_mask = None
else:
self.annotation_select_mask = set(names)
self._annotation_select_cache = None
def append_annotation_mask(self, names):
if self.annotation_select_mask is not None:
self.set_annotation_mask(set(names).union(self.annotation_select_mask))
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT,
we don't actually remove them from the Query since they might be used
later
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
@property
def annotation_select(self):
"""The OrderedDict of aggregate columns that are not masked, and should
be used in the SELECT clause.
This result is cached for optimization purposes.
"""
if self._annotation_select_cache is not None:
return self._annotation_select_cache
elif not self._annotations:
return {}
elif self.annotation_select_mask is not None:
self._annotation_select_cache = OrderedDict(
(k, v) for k, v in self.annotations.items()
if k in self.annotation_select_mask
)
return self._annotation_select_cache
else:
return self.annotations
@property
def extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
if not self._extra:
return {}
elif self.extra_select_mask is not None:
self._extra_select_cache = OrderedDict(
(k, v) for k, v in self.extra.items()
if k in self.extra_select_mask
)
return self._extra_select_cache
else:
return self.extra
def trim_start(self, names_with_path):
"""
Trims joins from the start of the join path. The candidates for trim
are the PathInfos in names_with_path structure that are m2m joins.
Also sets the select column so the start matches the join.
This method is meant to be used for generating the subquery joins &
cols in split_exclude().
Returns a lookup usable for doing outerq.filter(lookup=self). Returns
also if the joins in the prefix contain a LEFT OUTER join.
_"""
all_paths = []
for _, paths in names_with_path:
all_paths.extend(paths)
contains_louter = False
# Trim and operate only on tables that were generated for
# the lookup part of the query. That is, avoid trimming
# joins generated for F() expressions.
lookup_tables = [t for t in self.tables if t in self._lookup_joins or t == self.tables[0]]
for trimmed_paths, path in enumerate(all_paths):
if path.m2m:
break
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER:
contains_louter = True
alias = lookup_tables[trimmed_paths]
self.unref_alias(alias)
# The path.join_field is a Rel, lets get the other side's field
join_field = path.join_field.field
# Build the filter prefix.
paths_in_prefix = trimmed_paths
trimmed_prefix = []
for name, path in names_with_path:
if paths_in_prefix - len(path) < 0:
break
trimmed_prefix.append(name)
paths_in_prefix -= len(path)
trimmed_prefix.append(
join_field.foreign_related_fields[0].name)
trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix)
# Lets still see if we can trim the first join from the inner query
# (that is, self). We can't do this for LEFT JOINs because we would
# miss those rows that have nothing on the outer side.
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type != LOUTER:
select_fields = [r[0] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths + 1]
self.unref_alias(lookup_tables[trimmed_paths])
extra_restriction = join_field.get_extra_restriction(
self.where_class, None, lookup_tables[trimmed_paths + 1])
if extra_restriction:
self.where.add(extra_restriction, AND)
else:
# TODO: It might be possible to trim more joins from the start of the
# inner query if it happens to have a longer join chain containing the
# values in select_fields. Lets punt this one for now.
select_fields = [r[1] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths]
# The found starting point is likely a Join instead of a BaseTable reference.
# But the first entry in the query's FROM clause must not be a JOIN.
for table in self.tables:
if self.alias_refcount[table] > 0:
self.alias_map[table] = BaseTable(self.alias_map[table].table_name, table)
break
self.set_select([f.get_col(select_alias) for f in select_fields])
return trimmed_prefix, contains_louter
def is_nullable(self, field):
"""
A helper to check if the given field should be treated as nullable.
Some backends treat '' as null and Django treats such fields as
nullable for those backends. In such situations field.null can be
False even if we should treat the field as nullable.
"""
# We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have
# (nor should it have) knowledge of which connection is going to be
# used. The proper fix would be to defer all decisions where
# is_nullable() is needed to the compiler stage, but that is not easy
# to do currently.
if connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and field.empty_strings_allowed:
return True
else:
return field.null
def get_order_dir(field, default='ASC'):
"""
Returns the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == '-':
return field[1:], dirn[1]
return field, dirn[0]
def add_to_dict(data, key, value):
"""
A helper function to add "value" to the set of values for "key", whether or
not "key" already exists.
"""
if key in data:
data[key].add(value)
else:
data[key] = {value}
def is_reverse_o2o(field):
"""
A little helper to check if the given field is reverse-o2o. The field is
expected to be some sort of relation field or related object.
"""
return field.is_relation and field.one_to_one and not field.concrete
class JoinPromoter(object):
"""
A class to abstract away join promotion problems for complex filter
conditions.
"""
def __init__(self, connector, num_children, negated):
self.connector = connector
self.negated = negated
if self.negated:
if connector == AND:
self.effective_connector = OR
else:
self.effective_connector = AND
else:
self.effective_connector = self.connector
self.num_children = num_children
# Maps of table alias to how many times it is seen as required for
# inner and/or outer joins.
self.votes = Counter()
def add_votes(self, votes):
"""
Add single vote per item to self.votes. Parameter can be any
iterable.
"""
self.votes.update(votes)
def update_join_types(self, query):
"""
Change join types so that the generated query is as efficient as
possible, but still correct. So, change as many joins as possible
to INNER, but don't make OUTER joins INNER if that could remove
results from the query.
"""
to_promote = set()
to_demote = set()
# The effective_connector is used so that NOT (a AND b) is treated
# similarly to (a OR b) for join promotion.
for table, votes in self.votes.items():
# We must use outer joins in OR case when the join isn't contained
# in all of the joins. Otherwise the INNER JOIN itself could remove
# valid results. Consider the case where a model with rel_a and
# rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now,
# if rel_a join doesn't produce any results is null (for example
# reverse foreign key or null value in direct foreign key), and
# there is a matching row in rel_b with col=2, then an INNER join
# to rel_a would remove a valid match from the query. So, we need
# to promote any existing INNER to LOUTER (it is possible this
# promotion in turn will be demoted later on).
if self.effective_connector == 'OR' and votes < self.num_children:
to_promote.add(table)
# If connector is AND and there is a filter that can match only
# when there is a joinable row, then use INNER. For example, in
# rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL
# as join output, then the col=1 or col=2 can't match (as
# NULL=anything is always false).
# For the OR case, if all children voted for a join to be inner,
# then we can use INNER for the join. For example:
# (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell)
# then if rel_a doesn't produce any rows, the whole condition
# can't match. Hence we can safely use INNER join.
if self.effective_connector == 'AND' or (
self.effective_connector == 'OR' and votes == self.num_children):
to_demote.add(table)
# Finally, what happens in cases where we have:
# (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0
# Now, we first generate the OR clause, and promote joins for it
# in the first if branch above. Both rel_a and rel_b are promoted
# to LOUTER joins. After that we do the AND case. The OR case
# voted no inner joins but the rel_a__col__gte=0 votes inner join
# for rel_a. We demote it back to INNER join (in AND case a single
# vote is enough). The demotion is OK, if rel_a doesn't produce
# rows, then the rel_a__col__gte=0 clause can't be true, and thus
# the whole clause must be false. So, it is safe to use INNER
# join.
# Note that in this example we could just as well have the __gte
# clause and the OR clause swapped. Or we could replace the __gte
# clause with an OR clause containing rel_a__col=1|rel_a__col=2,
# and again we could safely demote to INNER.
query.promote_joins(to_promote)
query.demote_joins(to_demote)
return to_demote
| [
"[email protected]"
]
| |
51fdb88c14d2221f54fb2b293029612539b61752 | ced0efb0666b5817b9656cd533cf6f5db0085fe8 | /coding/codejam/apac/Aug/3.py | 2ca01808b5e807bc2132b1dfc20b4781743052a4 | []
| no_license | adithyaphilip/learning | 11fb6997ab3d613a358502dfff0ae9b91cd5ee27 | 64ecd3bc622077c7256df91cdf4dfbc8adf23068 | refs/heads/master | 2021-06-01T18:04:46.733092 | 2016-09-22T18:22:46 | 2016-09-22T18:22:46 | 68,949,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | chunks_d = {}
def max_chunks(n, m):
if n not in chunks_d:
sum_c = (n ** 2) % m
for i in range(n - 2, 0, -1):
sum_c += (max_chunks(i, m) ** 2) % m
sum_c %= m
return chunks_d[n]
def main():
pass
main()
| [
"[email protected]"
]
| |
8dafc9589c5f2c11426ea99411bd79160cfa6565 | 287df0fc88095f616bc0ed6f806de96ff2ca7509 | /spikeforest_widgets/widgets/MultiSort/__init__.py | d005165ec9030f4e498aa734c9bb13f5181463d0 | [
"Apache-2.0"
]
| permissive | flatironinstitute/spikeforest2 | 05b1171d0c98c431be3319865c9cd85cfa8e8779 | 9e77a7ac1a6d6eade9d7bbf6d60676ce6bb32eec | refs/heads/master | 2022-11-12T22:28:23.819206 | 2022-11-08T16:47:55 | 2022-11-08T16:47:55 | 224,878,139 | 30 | 9 | Apache-2.0 | 2020-06-24T19:26:48 | 2019-11-29T15:23:36 | Python | UTF-8 | Python | false | false | 32 | py | from .MultiSort import MultiSort | [
"[email protected]"
]
| |
41af7f6a19d875d44349cb7afca4084535e46ebd | 1af49694004c6fbc31deada5618dae37255ce978 | /content/test/gpu/run_gpu_integration_test_fuchsia.py | 7e33505aa105b1e959de8d8e632e53300b81a4f0 | [
"BSD-3-Clause"
]
| permissive | sadrulhc/chromium | 59682b173a00269ed036eee5ebfa317ba3a770cc | a4b950c23db47a0fdd63549cccf9ac8acd8e2c41 | refs/heads/master | 2023-02-02T07:59:20.295144 | 2020-12-01T21:32:32 | 2020-12-01T21:32:32 | 317,678,056 | 3 | 0 | BSD-3-Clause | 2020-12-01T21:56:26 | 2020-12-01T21:56:25 | null | UTF-8 | Python | false | false | 3,389 | py | #!/usr/bin/env vpython
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrapper for running gpu integration tests on Fuchsia devices."""
import argparse
import logging
import os
import shutil
import subprocess
import sys
import tempfile
from gpu_tests import path_util
sys.path.insert(0,
os.path.join(path_util.GetChromiumSrcDir(), 'build', 'fuchsia'))
from common_args import (AddCommonArgs, ConfigureLogging,
GetDeploymentTargetForArgs)
from symbolizer import RunSymbolizer
def main():
parser = argparse.ArgumentParser()
AddCommonArgs(parser)
args, gpu_test_args = parser.parse_known_args()
ConfigureLogging(args)
additional_target_args = {}
# If output_dir is not set, assume the script is being launched
# from the output directory.
if not args.out_dir:
args.out_dir = os.getcwd()
additional_target_args['out_dir'] = args.out_dir
# Create a temporary log file that Telemetry will look to use to build
# an artifact when tests fail.
temp_log_file = False
if not args.system_log_file:
args.system_log_file = os.path.join(tempfile.mkdtemp(), 'system-log')
temp_log_file = True
additional_target_args['system_log_file'] = args.system_log_file
package_names = ['web_engine_with_webui', 'web_engine_shell']
web_engine_dir = os.path.join(args.out_dir, 'gen', 'fuchsia', 'engine')
gpu_script = [
os.path.join(path_util.GetChromiumSrcDir(), 'content', 'test', 'gpu',
'run_gpu_integration_test.py')
]
# Pass all other arguments to the gpu integration tests.
gpu_script.extend(gpu_test_args)
try:
with GetDeploymentTargetForArgs(additional_target_args) as target:
target.Start()
fuchsia_device_address, fuchsia_ssh_port = target._GetEndpoint()
gpu_script.extend(['--chromium-output-directory', args.out_dir])
gpu_script.extend(['--fuchsia-device-address', fuchsia_device_address])
gpu_script.extend(['--fuchsia-ssh-config', target._GetSshConfigPath()])
if fuchsia_ssh_port:
gpu_script.extend(['--fuchsia-ssh-port', str(fuchsia_ssh_port)])
gpu_script.extend(['--fuchsia-system-log-file', args.system_log_file])
if args.verbose:
gpu_script.append('-v')
# Set up logging of WebEngine
listener = target.RunCommandPiped(['log_listener'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
build_ids_paths = map(
lambda package_name: os.path.join(
web_engine_dir, package_name, 'ids.txt'),
package_names)
RunSymbolizer(listener.stdout, open(args.system_log_file, 'w'),
build_ids_paths)
# Keep the Amber repository live while the test runs.
with target.GetAmberRepo():
# Install necessary packages on the device.
far_files = map(
lambda package_name: os.path.join(
web_engine_dir, package_name, package_name + '.far'),
package_names)
target.InstallPackage(far_files)
return subprocess.call(gpu_script)
finally:
if temp_log_file:
shutil.rmtree(os.path.dirname(args.system_log_file))
if __name__ == '__main__':
sys.exit(main())
| [
"[email protected]"
]
| |
57baaed90844c2af6223d7a0f17e4b0b8f44ed1a | 1a612ba00dd8cd5a1e505e2afda5c4512ebc6c2a | /src/iospec/types.py | f3b14bf8be4ae1b622e0a0f63b6017b91413c3e6 | []
| no_license | ricardogtx/iospec | b96ca052b4d8d75da02ae866d2171dafff0edd3c | 5dbf4d7640db9b97ce32ab081d9322361aff79a4 | refs/heads/master | 2021-01-18T04:02:18.836626 | 2016-05-16T02:30:42 | 2016-05-16T02:30:42 | 61,323,491 | 0 | 0 | null | 2016-06-16T20:24:18 | 2016-06-16T20:24:18 | null | UTF-8 | Python | false | false | 21,044 | py | import collections
import pprint
import copy
from generic import generic
__all__ = [
# Atomic
'Atom', 'Comment', 'In', 'Out', 'Command',
# Nodes
'IoSpec', 'TestCase', 'ErrorTestCase', 'IoTestCase', 'InputTestCase',
# Functions
'isequal', 'normalize'
]
#
# Atomic AST nodes
#
class Atom(collections.UserString):
"""Base class for all atomic elements"""
type = 'atom'
escape_chars = {
'<': '\\<',
'$': '\\$',
}
def __init__(self, data, *, lineno=None):
super().__init__(data)
self.lineno = lineno
def __str__(self):
return self.data
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.data)
def __eq__(self, other):
if type(self) is type(other):
return self.data == other.data
elif isinstance(other, str):
return self.data == other
return NotImplemented
def _escape(self, st):
for c, esc in self.escape_chars.items():
st = st.replace(c, esc)
return st
def _un_escape(self, st):
for c, esc in self.escape_chars.items():
st = st.replace(esc, c)
return st
def source(self):
"""Expand node as an iospec source code."""
return self._escape(self.data)
def copy(self):
"""Return a copy"""
return copy.copy(self)
def transform(self, func):
"""Return a transformed version of itself by the given function"""
new = copy.copy(self)
new.data = func(new.data)
return new
def to_json(self):
"""Return a pair of [type_name, data] that can be converted to valid
json."""
return type(self).__name__, str(self)
@classmethod
def from_json(cls, data):
"""Convert data created with to_json() back to a valid Atom object."""
klass = {
'In': In,
'Out': Out,
}[data[0]]
return klass(data[1])
class Comment(Atom):
"""Represent a raw block of comments"""
def source(self):
return self.data
def content(self):
return '\n'.join(line[1:] for line in self.data.splitlines() if line)
class InOrOut(Atom):
"""Common interfaces to In and Out classes"""
def __init__(self, data, *, fromsource=False, lineno=None):
if fromsource:
data = self._un_escape(data)
super().__init__(data, lineno=lineno)
class In(InOrOut):
"""Plain input string"""
type = 'input'
def source(self):
return '<%s>\n' % super().source()
class Out(InOrOut):
"""Plain output string"""
type = 'output'
def source(self):
data = super().source()
lines = data.split('\n')
if any(self._requires_line_escape(line) for line in lines):
data = '\n'.join(self._line_escape(line) for line in lines)
return data
@staticmethod
def _requires_line_escape(line):
return (not line) or line[0] in '#|'
@staticmethod
def _line_escape(line):
return '||' + line if line.startswith('|') else '|' + line
class Command(Atom):
"""A computed input of the form $name(args).
Parameters
----------
name : str
Name of the compute input
args : str
The string between parenthesis
factory : callable
A function that is used to generate new input values.
parsed_args : anything
The parsed argument string.
"""
type = 'input-command'
def __init__(self, name,
args=None, factory=None, parsed_args=None, lineno=None):
self.name = name
self.args = args
self.factory = factory or self.source
self.parsed_args = parsed_args
super().__init__('', lineno=lineno)
def __repr__(self):
if self.args is None:
return '%s(%r)' % (type(self).__name__, self.name)
else:
return '%s(%r, %r)' % (type(self).__name__, self.name, self.args)
@property
def data(self):
return self.source().rstrip('\n')
@data.setter
def data(self, value):
if value:
raise AttributeError('setting data to %r' % value)
def expand(self):
"""Expand command into a In() atom."""
return In(str(self.factory()), lineno=self.lineno)
def generate(self):
"""Generate a new value from the factory function."""
return self.factory()
def source(self):
if self.args is None:
return '$%s\n' % self.name
else:
escaped_args = self._escape(self.args)
return '$%s(%s)\n' % (self.name, escaped_args)
#
# Container nodes for the iospec AST
#
class LinearNode(collections.MutableSequence):
"""We call a single interaction/run of a program with a set of user inputs
a "test case".
There are different types of case nodes, either "error-*", for representing
failed executions, "input-*" for representing input-only specifications and
finally "io-*", that represents both inputs and outputs of a successful
program run.
"""
type = 'testcase'
def __init__(self, data=(), *, comment=None):
self._data = []
self.comment = (comment or '').strip()
self.meta = {}
if data:
self.extend(data)
def __iter__(self):
for x in self._data:
yield x
def __getitem__(self, idx):
if isinstance(idx, int):
return self._data[idx]
elif isinstance(idx, tuple):
data = self
for i in idx:
data = data[i]
return data
else:
raise IndexError(idx)
def __len__(self):
return len(self._data)
def __setitem__(self, i, value):
self._data[i] = value
def __delitem__(self, i):
del self._data[i]
def __repr__(self):
return super().__repr__()
def __eq__(self, other):
if type(self) is type(other):
return self.__dict__ == other.__dict__
return NotImplemented
def source(self):
"""Render AST node as iospec source code."""
data = ''.join(x.source() for x in self)
return self._with_comment(data)
def _with_comment(self, data):
if self.comment:
return '%s\n%s' % (self.comment, data)
return data
def insert(self, idx, value):
self._data.insert(idx, None)
try:
self[idx] = value
except:
del self._data[idx]
raise
def pformat(self, *args, **kwds):
"""Format AST in a pprint-like format."""
return pprint.pformat(self.json(), *args, **kwds)
def pprint(self, *args, **kwds):
"""Pretty print AST."""
print(self.pformat(*args, **kwds))
def json(self):
"""JSON-like expansion of the AST.
All linear node instances are expanded into dictionaries."""
D = {'type': getattr(self, 'type', type(self).__name__)}
D.update(vars(self))
# Hide default values
for key in ['lineno', 'comment', 'meta']:
if key in D and not D[key]:
del D[key]
# Rename private attributes
D['data'] = D.pop('_data')
for k in ['priority', 'error']:
if '_' + k in D:
D[k] = value = D.pop('_' + k)
if not value:
del D[k]
memo = dict()
def json(x):
obj_id = id(x)
if obj_id in memo and memo[obj_id] > 5:
if isinstance(x, list):
return Literal('[...]')
elif isinstance(x, (set, dict)):
return Literal('{...}')
if hasattr(type(x), '__contains__'):
memo[obj_id] = memo.get(obj_id, 0) + 1
if isinstance(x, (list, tuple)):
return [json(y) for y in x]
elif isinstance(x, LinearNode):
return x.json()
elif isinstance(x, dict):
return {k: json(v) for (k, v) in x.items()}
else:
return x
return {k: json(v) for (k, v) in D.items()}
def copy(self):
"""Return a deep copy."""
return copy.deepcopy(self)
def setmeta(self, attr, value):
"""Writes an attribute of meta information."""
self.meta[attr] = value
def getmeta(self, attr, *args):
"""Retrieves an attribute of meta information.
Can give a second positional argument with the default value to return
if the attribute does not exist."""
if args:
return self.meta.get(attr, args[0])
try:
return self.meta[attr]
except KeyError:
raise AttributeError('invalid meta attribute: %r' % attr)
def transform_strings(self, func):
"""Transform all visible string values in test case by the given
function *inplace*."""
for case in self:
case.transform_strings(func)
class IoSpec(LinearNode):
"""Root node of an iospec AST"""
type = 'iospec-root'
def __init__(self, data=(), *,
commands=None, make_commands=None, definitions=()):
super().__init__(data)
self.commands = AttrDict(commands or {})
self.make_commands = AttrDict(make_commands or {})
self.definitions = list(definitions)
def source(self):
prefix = '\n\n'.join(block.strip('\n') for block in self.definitions)
return prefix + '\n\n'.join(case.source() for case in self)
def inputs(self):
"""Return a list of input strings."""
return [x.inputs() for x in self]
def expand_inputs(self, size=0):
"""Expand all input command nodes into regular In() atoms.
The changes are done *inplace*.
Parameters
----------
size:
The target size for the total number of test cases. If the tree has
less test cases than size, it will create additional test cases
according to the test case priority.
"""
if size < len(self):
for case in self:
case.expand_inputs()
else:
# Expand to reach len(self) == size
diff = size - len(self)
pairs = [[case.priority, case] for case in self]
total_priority = max(sum(x[0] for x in pairs), 1)
for x in pairs:
x[0] *= diff / total_priority
cases = []
for priority, case in pairs:
cases.append(case)
for _ in range(round(priority)):
cases.append(case.copy())
self[:] = cases
# Expand inputs at this new size
self.expand_inputs()
def fuse_outputs(self):
"""Fuse any consecutive Out() strings together."""
for case in self:
case.fuse_outputs()
def has_errors(self):
"""Return True if the IoSpec data has some error block"""
return any(case.error is not None for case in self)
def get_error(self):
"""Return an exception that describes the first error encountered in
the run."""
for case in self:
if case.error is not None:
return case.error
def to_json(self):
"""Convert object to a json structure."""
return [x.to_json() for x in self]
@classmethod
def from_json(cls, data):
"""Decode JSON representation of IoSpec data."""
return cls([TestCase.from_json(x) for x in data])
class TestCase(LinearNode):
"""Base class for all test cases."""
# noinspection PyArgumentList
def __init__(self, data=(), *, priority=None, lineno=None, error=None, **kwds):
super().__init__(data, **kwds)
self._priority = priority
self.lineno = lineno
self.error = error
@property
def priority(self):
if self._priority is None:
if any(isinstance(atom, Command) for atom in self):
return 1.0
return 0.0
else:
return self._priority
@priority.setter
def priority(self, value):
self._priority = value
@property
def is_error(self):
return False
@property
def error(self):
return self._error
@error.setter
def error(self, value):
if isinstance(value, Exception):
self._error = value
elif isinstance(value, type) and issubclass(value, Exception):
self._error = value()
elif value is None:
self._error = value
else:
raise TypeError('expect exception, got %s' % value)
def inputs(self):
"""Return a list of inputs for the test case."""
raise NotImplementedError
def expand_inputs(self):
"""Expand all computed input nodes *inplace*."""
for idx, atom in enumerate(self):
if isinstance(atom, Command):
self[idx] = atom.expand()
def fuse_outputs(self):
pass
def to_json(self):
return {'type': self.type, 'data': [x.to_json() for x in self]}
@classmethod
def from_json(cls, data):
atoms = [Atom.from_json(x) for x in data['data']]
if data['type'] == 'io':
return IoTestCase(atoms)
else:
raise NotImplementedError
class IoTestCase(TestCase):
"""Regular input/output test case."""
@property
def type(self):
return 'io'
def inputs(self):
return [str(x) for x in self if isinstance(x, In)]
def fuse_outputs(self):
"""Fuse consecutive Out strings together"""
idx = 1
while idx < len(self):
cur = self[idx]
prev = self[idx - 1]
if isinstance(cur, Out) and isinstance(prev, Out):
self[idx - 1] = Out('%s\n%s' % (prev, cur))
del self[idx]
else:
idx += 1
def transform_strings(self, func):
for i, atom in enumerate(self):
if isinstance(atom, InOrOut):
self[i] = atom.transform(func)
class InputTestCase(TestCase):
"""Blocks that contain only input entries in which o outputs should be
computed by third parties.
It is created by the @input and @plain decorators of the IoSpec language.
"""
@property
def type(self):
return 'input'
def __init__(self, data=(), *, inline=True, **kwds):
super().__init__(data, **kwds)
self.inline = inline
def source(self):
if all(isinstance(x, In) for x in self):
prefix = '@plain'
else:
prefix = '@input'
if self.inline:
data = ';'.join(str(x).replace(';', '\\;').rstrip() for x in self)
source = prefix + ' ' + data
elif prefix == '@input':
data = '\n'.join((' %s' % x).rstrip() for x in self)
source = prefix + '\n' + data
else:
data = '\n'.join(' %s' % x.data for x in self)
source = prefix + '\n' + data
return self._with_comment(source)
def inputs(self):
out = []
for x in self:
if isinstance(x, In):
out.append(str(x))
else:
out.append(x.generate())
return out
# noinspection PyMethodParameters,PyDecorator,PyArgumentList
class ErrorTestCase(TestCase):
"""
Error test cases are created using a decorator::
@timeout-error
a regular block of input/output interactions
@runtime-error
a regular block of input/output interactions
@error
a block of messages displayed to stderr
@build-error
a regular block of input/output interactions
@error
a block of messages that should be displayed to stderr
@earlytermination-error
a regular block of input/output interactions
@error
a block of messages that should be displayed to stderr
The need for error blocks is twofold. It may be the case that the desired
behavior of a program is to indeed display an error message. It is also
necessary in order to use the IOSpec format to *describe* how a program
actually ran.
The type attribute of an ErrorTestCase is one of 'error-timeout',
'error-segfault' or 'error-exception'. In all cases, the error block
consists of a data section that has all regular io interactions just like
an io block and
"""
@property
def is_error(self):
return True
@property
def type(self):
return 'error-' + self.error_type
def __init__(self, data=(), *,
error_message='', error_type='exception', **kwds):
super().__init__(data, **kwds)
self.error_message = str(error_message)
self.error_type = str(error_type)
def _factory(tt):
@classmethod
def method(cls, data=(), **kwds):
if not kwds.get('error_type', tt):
raise ValueError('invalid error_type: %r' % tt)
kwds['error_type'] = tt
return cls(data, **kwds)
method.__name__ = tt
method.__doc__ = 'Constructor for %s errors' % tt
return method
build = _factory('build')
runtime = _factory('runtime')
timeout = _factory('timeout')
earlytermination = _factory('earlytermination')
def source(self):
if not self._data and not self.error_message:
return '@%s-error\n # Empty block' % self.error_type
comment, self.comment = self.comment, ''
try:
body = data = super().source()
finally:
self.comment = comment
body = '\n'.join(' ' + line for line in body.splitlines())
if self.error_message:
lines = self.error_message.splitlines()
error_msg = '\n'.join(' ' + line for line in lines)
error_msg = '\n\n @error\n' + error_msg
else:
error_msg = ''
source = '@%s-error\n%s%s' % (self.error_type, body, error_msg)
return self._with_comment(source)
def inputs(self):
return IoTestCase.inputs(self)
def transform_strings(self, func):
super().transform_strings(func)
self.error_message = func(self.error_message)
#
# Attribute dict
#
class AttrDict(dict):
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError
def __setattr__(self, key, value):
self[key] = value
#
# Comment deque
#
class CommentDeque(collections.deque):
__slots__ = ['comment']
def __init__(self, data=(), comment=None):
self.comment = comment
super().__init__(data)
class Literal(str):
"""A string-like object whose repr() is equal to str()"""
def __repr__(self):
return str(self)
#
# Auxiliary functions and normalizers
#
def presentation_normalizer(x):
x.transform_strings(lambda x: x.casefold().replace(' ', '').replace('\t', ''))
return x
def _assert_kwargs(D):
if not _valid_kwargs.issuperset(D):
arg = next(iter(set(D) - _valid_kwargs))
raise TypeError('invalid argument: %s' % arg)
def normalizer(normalize=None, presentation=False):
"""Return a normalizer function that performs all given transformations."""
L = [normalize] if normalize else []
if presentation:
L.append(presentation_normalizer)
L.reverse()
if L:
def func(x):
x = x.copy()
for f in L:
x = f(x)
return x
return func
else:
return lambda x: x
_valid_kwargs = {'presentation'}
def normalize(obj, normalize=None, **kwargs):
"""Normalize input by the given transformations.
If a list or tuple is passed, normalize each value and return a list."""
func = normalizer(normalize, **kwargs)
if isinstance(obj, LinearNode):
return func(obj)
return [func(x) for x in obj]
@generic
def isequal(x: TestCase, y: TestCase, **kwargs):
"""Return True if both objects are equal up to some normalization."""
x, y = normalize([x, y], **kwargs)
if type(x) is not type(y):
return False
return list(x) == list(y)
@isequal.overload
def _(x: ErrorTestCase, y: ErrorTestCase, **kwargs):
x, y = normalize([x, y], **kwargs)
if x.error_type != y.error_type:
return False
if x.error_message != y.error_message:
return False
return isequal[TestCase, TestCase](x, y)
@isequal.overload
def _(x: IoSpec, y: IoSpec, **kwargs):
func = normalizer(**kwargs)
if len(x) != len(y):
return False
for (xi, yi) in zip (x, y):
if not isequal(xi, yi, normalize=func):
return False
else:
return True
| [
"[email protected]"
]
| |
822741f77597ab35a14354227f0683e07d34800f | 93e5b82332af9f0d3e203d086e30794fb90a2086 | /ForKids/chapter07/savings.py | eba2bf48250983c5cd7e7acb0912ddde247fe4d0 | []
| no_license | swell1009/ex | cfaae0b5fe917f12416170dce60f7dea8194f368 | 29b274fb51adbdc43af6ebecaec89c97bc58be6f | refs/heads/master | 2020-04-04T10:15:20.578932 | 2018-11-22T06:27:30 | 2018-11-22T06:27:30 | 155,848,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | def savings(pocket_money, paper_route, spending):
return pocket_money + paper_route – spending
print(savings(10, 10, 5))
| [
"[email protected]"
]
| |
93759af8f7a6ac24af30c0957f2296cf3c95c118 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/knapsack_20200708152715.py | 6b2ae69fdf511f9fa60dc4defa0e8ddffc9845b1 | []
| no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | def Knap(a,b,w):
# declare an empty dictionary
newArr = []
for i,j in zip(a,b):
smallArr = []
smallArr.append(i)
smallArr.append(j)
newArr.append(smallArr)
i = 0
# at position 0 is the weight and at position 1 is the value
# goal is to find highest value but not greater than
while i < len(newArr):
Knap([10,20,30],[60,100,120],220) | [
"[email protected]"
]
| |
6c810f9843558ebd9fdbcfb4f069f53ab3020afd | 44b2743ff70ce0631e9714ce78c44720fa63a9ad | /app/productdb/migrations/0012_auto_20160725_2252.py | f0305b66317e1a360acdb83ff2cb9af81c7a6a76 | [
"MIT"
]
| permissive | hoelsner/product-database | 1b1b4db8e968f5bc149605093e4639c48a9ae1ad | c649569fb82bc4b0a5e9ef9615fff8a364ce652f | refs/heads/master | 2023-07-24T21:39:01.870692 | 2023-07-09T17:03:56 | 2023-07-09T17:03:56 | 43,767,455 | 43 | 27 | MIT | 2023-04-16T19:17:25 | 2015-10-06T17:44:50 | Python | UTF-8 | Python | false | false | 628 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-25 20:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('productdb', '0011_userprofile_regex_search'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='regex_search',
field=models.BooleanField(default=False, help_text='Use regular expression in any search field (fallback to simple search if no valid regular expression is used)', verbose_name='use regex search'),
),
]
| [
"[email protected]"
]
| |
be3c912ba0ddc4bcff9d69253f8d074868443909 | 640d26baa9322b92ea5d247280668b4ad7475f8d | /robot_assignment_ws/build/kobuki_keyop/catkin_generated/pkg.develspace.context.pc.py | 1b919eef351d66946b71d3613d93749a8c481f04 | []
| no_license | JulianVJacobs/Robotics-Project-2021 | 6baa5a6423a28cc278b84d831f2d8c9f5239da90 | 18a58cee8e2793bd05e5e158c0c998099fc62d5c | refs/heads/main | 2023-06-03T02:47:15.579120 | 2021-06-25T19:56:32 | 2021-06-25T19:56:32 | 374,733,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 600 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/julian/robot_assignment_ws/src/kobuki_keyop/include".split(';') if "/home/julian/robot_assignment_ws/src/kobuki_keyop/include" != "" else []
PROJECT_CATKIN_DEPENDS = "geometry_msgs;std_srvs;std_msgs;roscpp;ecl_exceptions;ecl_threads;ecl_time;kobuki_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lkobuki".split(';') if "-lkobuki" != "" else []
PROJECT_NAME = "kobuki_keyop"
PROJECT_SPACE_DIR = "/home/julian/robot_assignment_ws/devel"
PROJECT_VERSION = "0.7.6"
| [
"[email protected]"
]
| |
cbe426147ed87586dbfc67eeba8b4e4cbf5046b4 | d2a2546165b3db6295a3f21972dda8ab9aab7846 | /src/vehicles/towerhouse_flat.py | ce5fba774fa35c9f0aba0561dfddf9b98fb69324 | []
| no_license | andythenorth/road-hog | bab12b133dd674f0e6d7ae87498675f8da96b982 | 1800d57d4ce904e7041f24646c393b37903d9466 | refs/heads/main | 2022-09-26T19:57:31.006800 | 2022-09-17T10:09:37 | 2022-09-17T10:09:37 | 214,848,659 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | from road_vehicle import FlatHauler, DieselRoadVehicle
consist = FlatHauler(id='towerhouse_flat',
base_numeric_id=650,
name='Towerhouse',
semi_truck_so_redistribute_capacity=True,
vehicle_life=40,
intro_date=1968)
consist.add_unit(type=DieselRoadVehicle,
vehicle_length=2,
semi_truck_shift_offset_jank=2,
always_use_same_spriterow=True)
consist.add_unit(capacity=40,
vehicle_length=7,
cargo_length=4) # some cargo overlap eh?
| [
"[email protected]"
]
| |
a3bc86651ce830bbe7ddc395698eb5b9c2155f34 | 73fa26bff99b5caef6697769b6d53a3630c5afb3 | /portofolio/migrations/0001_initial.py | 5874a261e23cfdde461ad562ea7b4c1980b13252 | []
| no_license | handole/handofolio | 9ecb1a9359717f0b18e1c0f0ca3616cc365d8100 | 6190ed4a5d614d929489a62fb503a3434eec5349 | refs/heads/master | 2020-06-27T21:57:27.398249 | 2017-06-13T21:30:53 | 2017-06-13T21:30:53 | 94,252,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-13 17:31
from __future__ import unicode_literals
from django.db import migrations, models
import portofolio.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Portofol',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('content', models.TextField()),
('image', models.ImageField(upload_to=portofolio.models.upload_location)),
('slug', models.SlugField(unique=True)),
('updated', models.DateTimeField(auto_now=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ['-timestamp', '-updated'],
},
),
]
| [
"[email protected]"
]
| |
3e284e6af35d735275c78c58377428a8021d456b | 8574094da8e8ad4bd65e1bbe67dd1abd5003d6a9 | /zopen.plone.filerepos/src/zopen/plone/filerepos/utils.py | d06855e83e9d0c2a375e78d2e19d46f23de1bceb | []
| no_license | madfrog2018/everydo-project | 5b948f8b04c04773163eb5193e45604b1fe4a74e | d2ea5b83513cf4191e29ba70a1fc8b1d9950599f | refs/heads/master | 2021-01-17T06:33:26.229787 | 2010-07-15T01:45:19 | 2010-07-15T01:45:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | #-*- coding:utf-8 -*-
def getMaxAttachmentSize(user_id=''):
return 30 * 1024 * 1024
def getQuota(user_id=''):
return 500 * 1024 * 1024
| [
"[email protected]@4c72a389-b037-68f1-009d-6f17fb46af5f"
]
| [email protected]@4c72a389-b037-68f1-009d-6f17fb46af5f |
7393f5270b5de41f109002d335d487787a844171 | 1af49694004c6fbc31deada5618dae37255ce978 | /ios/build/bots/scripts/xcode_log_parser_test.py | acdfed6f2dd52affa1f27fe269e794982a552963 | [
"BSD-3-Clause"
]
| permissive | sadrulhc/chromium | 59682b173a00269ed036eee5ebfa317ba3a770cc | a4b950c23db47a0fdd63549cccf9ac8acd8e2c41 | refs/heads/master | 2023-02-02T07:59:20.295144 | 2020-12-01T21:32:32 | 2020-12-01T21:32:32 | 317,678,056 | 3 | 0 | BSD-3-Clause | 2020-12-01T21:56:26 | 2020-12-01T21:56:25 | null | UTF-8 | Python | false | false | 20,507 | py | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for xcode_log_parser.py."""
import json
import mock
import os
import unittest
import test_runner
import test_runner_test
import xcode_log_parser
OUTPUT_PATH = '/tmp/attempt_0'
XCRESULT_PATH = '/tmp/attempt_0.xcresult'
XCODE11_DICT = {
'path': '/Users/user1/Xcode.app',
'version': '11.0',
'build': '11M336w',
}
# A sample of json result when executing xcresulttool on .xcresult dir without
# --id. Some unused keys and values were removed.
XCRESULT_ROOT = """
{
"_type" : {
"_name" : "ActionsInvocationRecord"
},
"actions" : {
"_values" : [
{
"actionResult" : {
"_type" : {
"_name" : "ActionResult"
},
"diagnosticsRef" : {
"id" : {
"_value" : "DIAGNOSTICS_REF_ID"
}
},
"logRef" : {
"id" : {
"_value" : "0~6jr1GkZxoWVzWfcUNA5feff3l7g8fPHJ1rqKetCBa3QXhCGY74PnEuRwzktleMTFounMfCdDpSr1hRfhUGIUEQ=="
}
},
"testsRef" : {
"id" : {
"_value" : "0~iRbOkDnmtKVIvHSV2jkeuNcg4RDTUaCLZV7KijyxdCqvhqtp08MKxl0MwjBAPpjmruoI7qNHzBR1RJQAlANNHA=="
}
}
}
}
]
},
"issues" : {
"testFailureSummaries" : {
"_values" : [
{
"documentLocationInCreatingWorkspace" : {
"url" : {
"_value" : "file:\/\/\/..\/..\/ios\/web\/shell\/test\/page_state_egtest.mm#CharacterRangeLen=0&EndingLineNumber=130&StartingLineNumber=130"
}
},
"message" : {
"_value": "Fail. Screenshots: {\\n\\"Failure\\": \\"path.png\\"\\n}"
},
"testCaseName" : {
"_value": "-[PageStateTestCase testZeroContentOffsetAfterLoad]"
}
}
]
}
},
"metrics" : {
"testsCount" : {
"_value" : "2"
},
"testsFailedCount" : {
"_value" : "1"
}
}
}"""
REF_ID = """
{
"actions": {
"_values": [{
"actionResult": {
"testsRef": {
"id": {
"_value": "REF_ID"
}
}
}
}]
}
}"""
# A sample of json result when executing xcresulttool on .xcresult dir with
# "testsRef" as --id input. Some unused keys and values were removed.
TESTS_REF = """
{
"summaries": {
"_values": [{
"testableSummaries": {
"_type": {
"_name": "Array"
},
"_values": [{
"tests": {
"_type": {
"_name": "Array"
},
"_values": [{
"identifier" : {
"_value" : "All tests"
},
"name" : {
"_value" : "All tests"
},
"subtests": {
"_values": [{
"identifier" : {
"_value" : "ios_web_shell_eg2tests_module.xctest"
},
"name" : {
"_value" : "ios_web_shell_eg2tests_module.xctest"
},
"subtests": {
"_values": [{
"identifier" : {
"_value" : "PageStateTestCase"
},
"name" : {
"_value" : "PageStateTestCase"
},
"subtests": {
"_values": [{
"testStatus": {
"_value": "Success"
},
"identifier": {
"_value": "PageStateTestCase/testMethod1"
},
"name": {
"_value": "testMethod1"
}
},
{
"summaryRef": {
"id": {
"_value": "0~7Q_uAuUSJtx9gtHM08psXFm3g_xiTTg5bpdoDO88nMXo_iMwQTXpqlrlMe5AtkYmnZ7Ux5uEgAe83kJBfoIckw=="
}
},
"testStatus": {
"_value": "Failure"
},
"identifier": {
"_value": "PageStateTestCase\/testZeroContentOffsetAfterLoad"
},
"name": {
"_value": "testZeroContentOffsetAfterLoad"
}
},
{
"testStatus": {
"_value": "Success"
},
"identifier": {
"_value": "PageStateTestCase/testMethod2"
},
"name": {
"_value": "testMethod2"
}
}]
}
}]
}
}]
}
}]
}
}]
}
}]
}
}
"""
# A sample of json result when executing xcresulttool on .xcresult dir with
# a single test summaryRef id value as --id input. Some unused keys and values
# were removed.
SINGLE_TEST_SUMMARY_REF = """
{
"_type" : {
"_name" : "ActionTestSummary",
"_supertype" : {
"_name" : "ActionTestSummaryIdentifiableObject",
"_supertype" : {
"_name" : "ActionAbstractTestSummary"
}
}
},
"activitySummaries" : {
"_values" : [
{
"attachments" : {
"_values" : [
{
"filename" : {
"_value" : "Screenshot_25659115-F3E4-47AE-AA34-551C94333D7E.jpg"
},
"payloadRef" : {
"id" : {
"_value" : "SCREENSHOT_REF_ID_1"
}
}
}
]
},
"title" : {
"_value" : "Start Test at 2020-10-19 14:12:58.111"
}
},
{
"subactivities" : {
"_values" : [
{
"attachments" : {
"_values" : [
{
"filename" : {
"_value" : "Screenshot_23D95D0E-8B97-4F99-BE3C-A46EDE5999D7.jpg"
},
"payloadRef" : {
"id" : {
"_value" : "SCREENSHOT_REF_ID_2"
}
}
}
]
},
"subactivities" : {
"_values" : [
{
"subactivities" : {
"_values" : [
{
"attachments" : {
"_values" : [
{
"filename" : {
"_value" : "Crash_3F0A2B1C-7ADA-436E-A54C-D4C39B8411F8.crash"
},
"payloadRef" : {
"id" : {
"_value" : "CRASH_REF_ID_IN_ACTIVITY_SUMMARIES"
}
}
}
]
},
"title" : {
"_value" : "Wait for org.chromium.ios-web-shell-eg2tests to idle"
}
}
]
},
"title" : {
"_value" : "Activate org.chromium.ios-web-shell-eg2tests"
}
}
]
},
"title" : {
"_value" : "Open org.chromium.ios-web-shell-eg2tests"
}
}
]
},
"title" : {
"_value" : "Set Up"
}
},
{
"title" : {
"_value" : "Find the Target Application 'org.chromium.ios-web-shell-eg2tests'"
}
},
{
"attachments" : {
"_values" : [
{
"filename" : {
"_value" : "Screenshot_278BA84B-2196-4CCD-9D31-2C07DDDC9DFC.jpg"
},
"payloadRef" : {
"id" : {
"_value" : "SCREENSHOT_REF_ID_3"
}
}
}
]
},
"title" : {
"_value" : "Uncaught Exception at page_state_egtest.mm:131: \\nCannot scroll, the..."
}
},
{
"title" : {
"_value" : "Uncaught Exception: Immediately halt execution of testcase (EarlGreyInternalTestInterruptException)"
}
},
{
"title" : {
"_value" : "Tear Down"
}
}
]
},
"failureSummaries" : {
"_values" : [
{
"attachments" : {
"_values" : [
{
"filename" : {
"_value" : "kXCTAttachmentLegacyScreenImageData_1_6CED1FE5-96CA-47EA-9852-6FADED687262.jpeg"
},
"payloadRef" : {
"id" : {
"_value" : "SCREENSHOT_REF_ID_IN_FAILURE_SUMMARIES"
}
}
}
]
},
"fileName" : {
"_value" : "\/..\/..\/ios\/web\/shell\/test\/page_state_egtest.mm"
},
"lineNumber" : {
"_value" : "131"
},
"message" : {
"_value" : "Some logs."
}
},
{
"message" : {
"_value" : "Immediately halt execution of testcase (EarlGreyInternalTestInterruptException)"
}
}
]
},
"identifier" : {
"_value" : "PageStateTestCase\/testZeroContentOffsetAfterLoad"
},
"name" : {
"_value" : "testZeroContentOffsetAfterLoad"
},
"testStatus" : {
"_value" : "Failure"
}
}"""
def _xcresulttool_get_side_effect(xcresult_path, ref_id=None):
"""Side effect for _xcresulttool_get in Xcode11LogParser tested."""
if ref_id is None:
return XCRESULT_ROOT
if ref_id == 'testsRef':
return TESTS_REF
# Other situation in use cases of xcode_log_parser is asking for single test
# summary ref.
return SINGLE_TEST_SUMMARY_REF
class XCode11LogParserTest(test_runner_test.TestCase):
"""Test case to test Xcode11LogParser."""
def setUp(self):
super(XCode11LogParserTest, self).setUp()
self.mock(test_runner, 'get_current_xcode_info', lambda: XCODE11_DICT)
@mock.patch('xcode_util.version', autospec=True)
def testGetParser(self, mock_xcode_version):
mock_xcode_version.return_value = ('12.0', '12A7209')
self.assertEqual(xcode_log_parser.get_parser().__class__.__name__, 'Xcode11LogParser')
mock_xcode_version.return_value = ('11.4', '11E146')
self.assertEqual(xcode_log_parser.get_parser().__class__.__name__, 'Xcode11LogParser')
mock_xcode_version.return_value = ('10.3', '10G8')
self.assertEqual(xcode_log_parser.get_parser().__class__.__name__, 'XcodeLogParser')
@mock.patch('subprocess.check_output', autospec=True)
def testXcresulttoolGetRoot(self, mock_process):
mock_process.return_value = '%JSON%'
xcode_log_parser.Xcode11LogParser()._xcresulttool_get('xcresult_path')
self.assertTrue(
os.path.join(XCODE11_DICT['path'], 'usr', 'bin') in os.environ['PATH'])
self.assertEqual(
['xcresulttool', 'get', '--format', 'json', '--path', 'xcresult_path'],
mock_process.mock_calls[0][1][0])
@mock.patch('subprocess.check_output', autospec=True)
def testXcresulttoolGetRef(self, mock_process):
mock_process.side_effect = [REF_ID, 'JSON']
xcode_log_parser.Xcode11LogParser()._xcresulttool_get('xcresult_path',
'testsRef')
self.assertEqual(
['xcresulttool', 'get', '--format', 'json', '--path', 'xcresult_path'],
mock_process.mock_calls[0][1][0])
self.assertEqual([
'xcresulttool', 'get', '--format', 'json', '--path', 'xcresult_path',
'--id', 'REF_ID'], mock_process.mock_calls[1][1][0])
def testXcresulttoolListFailedTests(self):
failure_message = [
'file:///../../ios/web/shell/test/page_state_egtest.mm#'
'CharacterRangeLen=0&EndingLineNumber=130&StartingLineNumber=130'
] + 'Fail. Screenshots: {\n\"Failure\": \"path.png\"\n}'.splitlines()
expected = {
'PageStateTestCase/testZeroContentOffsetAfterLoad': failure_message
}
self.assertEqual(
expected,
xcode_log_parser.Xcode11LogParser()._list_of_failed_tests(
json.loads(XCRESULT_ROOT)))
@mock.patch('xcode_log_parser.Xcode11LogParser._xcresulttool_get')
def testXcresulttoolListPassedTests(self, mock_xcresult):
mock_xcresult.side_effect = _xcresulttool_get_side_effect
expected = [
'PageStateTestCase/testMethod1', 'PageStateTestCase/testMethod2'
]
results = {'passed': [], 'failed': {}}
xcode_log_parser.Xcode11LogParser()._get_test_statuses(OUTPUT_PATH, results)
self.assertEqual(expected, results['passed'])
@mock.patch('file_util.zip_and_remove_folder')
@mock.patch('xcode_log_parser.Xcode11LogParser.copy_artifacts')
@mock.patch('xcode_log_parser.Xcode11LogParser.export_diagnostic_data')
@mock.patch('os.path.exists', autospec=True)
@mock.patch('xcode_log_parser.Xcode11LogParser._xcresulttool_get')
@mock.patch('xcode_log_parser.Xcode11LogParser._list_of_failed_tests')
def testCollectTestTesults(self, mock_get_failed_tests, mock_root,
mock_exist_file, *args):
metrics_json = """
{
"metrics": {
"testsCount": {
"_value": "7"
},
"testsFailedCount": {
"_value": "14"
}
}
}"""
expected_test_results = {
'passed': [
'PageStateTestCase/testMethod1', 'PageStateTestCase/testMethod2'
],
'failed': {
'WebUITestCase/testBackForwardFromWebURL': [
'file://<unknown>#CharacterRangeLen=0',
'Test crashed in <external symbol>'
]
}
}
mock_get_failed_tests.return_value = expected_test_results['failed']
mock_root.side_effect = _xcresulttool_get_side_effect
mock_exist_file.return_value = True
self.assertEqual(
expected_test_results,
xcode_log_parser.Xcode11LogParser().collect_test_results(
OUTPUT_PATH, []))
@mock.patch('file_util.zip_and_remove_folder')
@mock.patch('xcode_log_parser.Xcode11LogParser.copy_artifacts')
@mock.patch('xcode_log_parser.Xcode11LogParser.export_diagnostic_data')
@mock.patch('os.path.exists', autospec=True)
@mock.patch('xcode_log_parser.Xcode11LogParser._xcresulttool_get')
def testCollectTestsRanZeroTests(self, mock_root, mock_exist_file, *args):
metrics_json = '{"metrics": {}}'
expected_test_results = {
'passed': [],
'failed': {'TESTS_DID_NOT_START': ['0 tests executed!']}}
mock_root.return_value = metrics_json
mock_exist_file.return_value = True
self.assertEqual(
expected_test_results,
xcode_log_parser.Xcode11LogParser().collect_test_results(
OUTPUT_PATH, []))
@mock.patch('os.path.exists', autospec=True)
def testCollectTestsDidNotRun(self, mock_exist_file):
mock_exist_file.return_value = False
expected_test_results = {
'passed': [],
'failed': {
'TESTS_DID_NOT_START': [
'%s with staging data does not exist.' % OUTPUT_PATH
]
}
}
self.assertEqual(
expected_test_results,
xcode_log_parser.Xcode11LogParser().collect_test_results(
OUTPUT_PATH, []))
@mock.patch('os.path.exists', autospec=True)
def testCollectTestsInterruptedRun(self, mock_exist_file):
mock_exist_file.side_effect = [True, False]
expected_test_results = {
'passed': [],
'failed': {
'BUILD_INTERRUPTED': [
'%s with test results does not exist.' % XCRESULT_PATH
]
}
}
self.assertEqual(
expected_test_results,
xcode_log_parser.Xcode11LogParser().collect_test_results(
OUTPUT_PATH, []))
@mock.patch('subprocess.check_output', autospec=True)
@mock.patch('os.path.exists', autospec=True)
@mock.patch('xcode_log_parser.Xcode11LogParser._xcresulttool_get')
def testCopyScreenshots(self, mock_xcresulttool_get, mock_path_exists,
mock_process):
mock_path_exists.return_value = True
mock_xcresulttool_get.side_effect = _xcresulttool_get_side_effect
xcode_log_parser.Xcode11LogParser().copy_artifacts(OUTPUT_PATH)
mock_process.assert_any_call([
'xcresulttool', 'export', '--type', 'file', '--id',
'SCREENSHOT_REF_ID_IN_FAILURE_SUMMARIES', '--path', XCRESULT_PATH,
'--output-path',
'/tmp/attempt_0_PageStateTestCase_testZeroContentOffsetAfterLoad_2.jpeg'
])
mock_process.assert_any_call([
'xcresulttool', 'export', '--type', 'file', '--id',
'CRASH_REF_ID_IN_ACTIVITY_SUMMARIES', '--path', XCRESULT_PATH,
'--output-path',
'/tmp/attempt_0_PageStateTestCase_testZeroContentOffsetAfterLoad_1'
'.crash'
])
# Ensures screenshots in activitySummaries are not copied.
self.assertEqual(2, mock_process.call_count)
@mock.patch('file_util.zip_and_remove_folder')
@mock.patch('subprocess.check_output', autospec=True)
@mock.patch('os.path.exists', autospec=True)
@mock.patch('xcode_log_parser.Xcode11LogParser._xcresulttool_get')
def testExportDiagnosticData(self, mock_xcresulttool_get, mock_path_exists,
mock_process, _):
mock_path_exists.return_value = True
mock_xcresulttool_get.side_effect = _xcresulttool_get_side_effect
xcode_log_parser.Xcode11LogParser.export_diagnostic_data(OUTPUT_PATH)
mock_process.assert_called_with([
'xcresulttool', 'export', '--type', 'directory', '--id',
'DIAGNOSTICS_REF_ID', '--path', XCRESULT_PATH, '--output-path',
'/tmp/attempt_0.xcresult_diagnostic'
])
@mock.patch('os.path.exists', autospec=True)
def testCollectTestResults_interruptedTests(self, mock_path_exists):
mock_path_exists.side_effect = [True, False]
output = [
'[09:03:42:INFO] Test case \'-[TestCase1 method1]\' passed on device.',
'[09:06:40:INFO] Test Case \'-[TestCase2 method1]\' passed on device.',
'[09:09:00:INFO] Test case \'-[TestCase2 method1]\' failed on device.',
'** BUILD INTERRUPTED **',
]
not_found_message = ['%s with test results does not exist.' % XCRESULT_PATH]
res = xcode_log_parser.Xcode11LogParser().collect_test_results(
OUTPUT_PATH, output)
self.assertIn('BUILD_INTERRUPTED', res['failed'])
self.assertEqual(not_found_message + output,
res['failed']['BUILD_INTERRUPTED'])
self.assertEqual(['TestCase1/method1', 'TestCase2/method1'],
res['passed'])
@mock.patch('file_util.zip_and_remove_folder')
@mock.patch('xcode_log_parser.Xcode11LogParser.copy_artifacts')
@mock.patch('xcode_log_parser.Xcode11LogParser.export_diagnostic_data')
@mock.patch('os.path.exists', autospec=True)
@mock.patch('xcode_log_parser.Xcode11LogParser._xcresulttool_get')
@mock.patch('xcode_log_parser.Xcode11LogParser._list_of_failed_tests')
def testArtifactsDiagnosticLogsExportedInCollectTestTesults(
self, mock_get_failed_tests, mock_root, mock_exist_file,
mock_export_diagnostic_data, mock_copy_artifacts, mock_zip):
mock_root.side_effect = _xcresulttool_get_side_effect
mock_exist_file.return_value = True
xcode_log_parser.Xcode11LogParser().collect_test_results(OUTPUT_PATH, [])
mock_export_diagnostic_data.assert_called_with(OUTPUT_PATH)
mock_copy_artifacts.assert_called_with(OUTPUT_PATH)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
e1a323d444a2e947bff196013ec3c3a287d0fd63 | bf73b244a116a4fa01b3a91d11025a0cb29c1374 | /ecomapp/views.py | 0a2759458713f09de0c5fd4afa5ff3e922927189 | []
| no_license | brahim024/django-ecommerce-web-app | f88b3586d04bba59019322638e90b98d11033ba9 | f817eda9ab273b001fedc9a78d0aee3a13aa767c | refs/heads/master | 2023-03-01T12:05:30.259028 | 2021-02-10T12:11:21 | 2021-02-10T12:11:21 | 289,489,478 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,641 | py | from django.shortcuts import render, get_object_or_404,redirect
from django.http import HttpResponse,HttpResponseRedirect
from .models import Category, Product
from cart.forms import CartAddProductForm
from .forms import CommentForm
from .filters import ProductFilter
# Create your views here.
def product_list(request,category_slug=None):
category=None
categories=Category.objects.all()
product=Product.objects.filter(available=True)
if category_slug:
category=get_object_or_404(Category,slug=category_slug)
product=product.filter(category=category)
myfilter=ProductFilter(request.GET,queryset=Product.objects.all())
return render(request,'list.html',
{'category':category,
'categories':categories,
'products':product,'myfilter':myfilter})
def product_detail(request,id,slug):
product=get_object_or_404(Product,id=id,slug=slug,available=True)
cart_product_form=CartAddProductForm()
comments=product.comment.filter(active=True)
new_comment=None
if request.method=='POST':
form=CommentForm(request.POST)
if form.is_valid():
new_comment=form.save(commit=False)
new_comment.product=product
new_comment.save()
#return HttpResponseRedirect('stor/product_list_by_category')
else:
form=CommentForm()
context={'form':form,'product':product,
'cart_product_form':cart_product_form,
'new_comment':new_comment,
'comments':comments,}
return render(request,'details.html',context)
| [
"[email protected]"
]
| |
a5086b6d1c057d5c36fe149da514d848a8e489b7 | 6413fe58b04ac2a7efe1e56050ad42d0e688adc6 | /tempenv/lib/python3.7/site-packages/plotly/graph_objs/isosurface/hoverlabel/_font.py | 6393e90a67d88d67b0f038eefc4982bc02625fe7 | [
"MIT"
]
| permissive | tytechortz/Denver_temperature | 7f91e0ac649f9584147d59193568f6ec7efe3a77 | 9d9ea31cd7ec003e8431dcbb10a3320be272996d | refs/heads/master | 2022-12-09T06:22:14.963463 | 2019-10-09T16:30:52 | 2019-10-09T16:30:52 | 170,581,559 | 1 | 0 | MIT | 2022-06-21T23:04:21 | 2019-02-13T21:22:53 | Python | UTF-8 | Python | false | false | 10,983 | py | from plotly.basedatatypes import BaseTraceHierarchyType
import copy
class Font(BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self['color']
@color.setter
def color(self, val):
self['color'] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['colorsrc']
@colorsrc.setter
def colorsrc(self, val):
self['colorsrc'] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self['family']
@family.setter
def family(self, val):
self['family'] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on plot.ly for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['familysrc']
@familysrc.setter
def familysrc(self, val):
self['familysrc'] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self['size']
@size.setter
def size(self, val):
self['size'] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['sizesrc']
@sizesrc.setter
def sizesrc(self, val):
self['sizesrc'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'isosurface.hoverlabel'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly.graph_objs.isosurface.hoverlabel.Font
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
Returns
-------
Font
"""
super(Font, self).__init__('font')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.isosurface.hoverlabel.Font
constructor must be a dict or
an instance of plotly.graph_objs.isosurface.hoverlabel.Font"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.isosurface.hoverlabel import (font as v_font)
# Initialize validators
# ---------------------
self._validators['color'] = v_font.ColorValidator()
self._validators['colorsrc'] = v_font.ColorsrcValidator()
self._validators['family'] = v_font.FamilyValidator()
self._validators['familysrc'] = v_font.FamilysrcValidator()
self._validators['size'] = v_font.SizeValidator()
self._validators['sizesrc'] = v_font.SizesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('color', None)
self['color'] = color if color is not None else _v
_v = arg.pop('colorsrc', None)
self['colorsrc'] = colorsrc if colorsrc is not None else _v
_v = arg.pop('family', None)
self['family'] = family if family is not None else _v
_v = arg.pop('familysrc', None)
self['familysrc'] = familysrc if familysrc is not None else _v
_v = arg.pop('size', None)
self['size'] = size if size is not None else _v
_v = arg.pop('sizesrc', None)
self['sizesrc'] = sizesrc if sizesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| [
"[email protected]"
]
| |
e62bf5693b8310c80b29d8becdb2a5943dfa324f | c113158bf6ce1edeb24298d711fddc56af2d62ee | /heltour/tournament/migrations/0084_gamenomination_pairing.py | c3ea3fca48dc9aab4cd2bf0f78af016d6fa6ae79 | [
"MIT"
]
| permissive | elvisaronsp/heltour | 44d05b6e195b5fd939304ac776167e85762aec83 | f5444552293ee4d51cbc7cf025857ed23d7d03dd | refs/heads/master | 2020-06-19T06:16:54.312479 | 2019-05-26T15:13:33 | 2019-05-26T15:13:33 | 196,594,361 | 1 | 0 | MIT | 2019-07-12T14:34:58 | 2019-07-12T14:34:58 | null | UTF-8 | Python | false | false | 540 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-01 20:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tournament', '0083_auto_20160901_1905'),
]
operations = [
migrations.AddField(
model_name='gamenomination',
name='pairing',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='tournament.PlayerPairing'),
),
]
| [
"[email protected]"
]
| |
619df83ed8d14e7f8c40459b49bf4ef7e0e7a49a | c6e885e317915496c655dca38d8b7b830f848d64 | /worker.py | eae0a90ea4979b663fcd33ddea68627d83040a2c | []
| no_license | Kelvinson/tensorflow-a3c | 2bac0d9226b49ec604a98e76fdfadd55c402abc9 | 50258c4c5f4abe16d2df4bfd411fb5f3fd24ccbb | refs/heads/master | 2021-08-22T06:02:21.320069 | 2017-11-29T12:43:17 | 2017-11-29T12:43:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,826 | py | from collections import deque
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import gym
from network import create_network
from train_ops import *
from utils import *
G = 0.99
N_ACTIONS = 3
ACTIONS = np.arange(N_ACTIONS) + 1
N_FRAMES_STACKED = 4
N_MAX_NOOPS = 30
def list_set(l, i, val):
assert(len(l) == i)
l.append(val)
class Worker:
def __init__(self, sess, worker_n, env_name, summary_writer):
self.sess = sess
self.env = EnvWrapper(gym.make(env_name), prepro2=prepro2, frameskip=4)
worker_scope = "worker_%d" % worker_n
self.network = create_network(worker_scope)
self.summary_writer = summary_writer
self.scope = worker_scope
self.reward = tf.Variable(0.0)
self.reward_summary = tf.summary.scalar('reward', self.reward)
policy_optimizer = tf.train.AdamOptimizer(learning_rate=0.0005)
value_optimizer = tf.train.AdamOptimizer(learning_rate=0.0005)
self.update_policy_gradients, self.apply_policy_gradients, self.zero_policy_gradients, self.grad_bufs_policy = \
create_train_ops(self.network.policy_loss,
policy_optimizer,
update_scope=worker_scope,
apply_scope='global')
self.update_value_gradients, self.apply_value_gradients, self.zero_value_gradients, self.grad_bufs_value = \
create_train_ops(self.network.value_loss,
value_optimizer,
update_scope=worker_scope,
apply_scope='global')
self.init_copy_ops()
self.frame_stack = deque(maxlen=N_FRAMES_STACKED)
self.reset_env()
self.t_max = 10000
self.steps = 0
self.episode_rewards = []
self.render = False
self.value_log = deque(maxlen=100)
self.fig = None
def reset_env(self):
self.frame_stack.clear()
self.env.reset()
n_noops = np.random.randint(low=0, high=N_MAX_NOOPS+1)
print("%d no-ops..." % n_noops)
for i in range(n_noops):
o, _, _, _ = self.env.step(0)
self.frame_stack.append(o)
while len(self.frame_stack) < N_FRAMES_STACKED:
print("One more...")
o, _, _, _ = self.env.step(0)
self.frame_stack.append(o)
print("No-ops done")
def log_rewards(self):
reward_sum = sum(self.episode_rewards)
print("Reward sum was", reward_sum)
self.sess.run(tf.assign(self.reward, reward_sum))
summ = self.sess.run(self.reward_summary)
self.summary_writer.add_summary(summ, self.steps)
def init_copy_ops(self):
from_tvs = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope='global')
to_tvs = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope=self.scope)
from_dict = {var.name: var for var in from_tvs}
to_dict = {var.name: var for var in to_tvs}
copy_ops = []
for to_name, to_var in to_dict.items():
from_name = to_name.replace(self.scope, 'global')
from_var = from_dict[from_name]
op = to_var.assign(from_var.value())
copy_ops.append(op)
self.copy_ops = copy_ops
def sync_network(self):
self.sess.run(self.copy_ops)
def value_graph(self):
if self.fig is None:
self.fig, self.ax = plt.subplots()
self.fig.set_size_inches(2, 2)
self.ax.set_xlim([0, 100])
self.ax.set_ylim([0, 2.0])
self.line, = self.ax.plot([], [])
self.fig.show()
self.fig.canvas.draw()
self.bg = self.fig.canvas.copy_from_bbox(self.ax.bbox)
self.fig.canvas.restore_region(self.bg)
ydata = list(self.value_log)
xdata = list(range(len(self.value_log)))
self.line.set_data(xdata, ydata)
self.ax.draw_artist(self.line)
self.fig.canvas.update()
self.fig.canvas.flush_events()
def run_step(self):
states = []
actions = []
rewards = []
i = 0
self.sess.run([self.zero_policy_gradients,
self.zero_value_gradients])
self.sync_network()
list_set(states, i, self.frame_stack)
done = False
while not done and i < self.t_max:
#print("Step %d" % i)
s = np.moveaxis(self.frame_stack, source=0, destination=-1)
feed_dict = {self.network.s: [s]}
a_p = self.sess.run(self.network.a_softmax, feed_dict=feed_dict)[0]
a = np.random.choice(ACTIONS, p=a_p)
list_set(actions, i, a)
o, r, done, _ = self.env.step(a)
if self.render:
self.env.render()
feed_dict = {self.network.s: [s]}
v = self.sess.run(self.network.graph_v, feed_dict=feed_dict)[0]
self.value_log.append(v)
self.value_graph()
if r != 0:
print("Got reward", r)
self.frame_stack.append(o)
self.episode_rewards.append(r)
list_set(rewards, i, r)
list_set(states, i + 1, np.copy(self.frame_stack))
i += 1
if done:
print("Episode done")
self.log_rewards()
self.episode_rewards = []
# Calculate initial value for R
if done:
# Terminal state
r = 0
else:
# Non-terminal state
# Estimate the value of the current state using the value network
# (states[i]: the last state)
s = np.moveaxis(states[i], source=0, destination=-1)
feed_dict = {self.network.s: [s]}
r = self.sess.run(self.network.graph_v, feed_dict=feed_dict)[0]
# i - 1 to 0
# (Why start from i - 1, rather than i?
# So that we miss out the last state.)
for j in reversed(range(i)):
s = np.moveaxis(states[j], source=0, destination=-1)
r = rewards[j] + G * r
feed_dict = {self.network.s: [s],
# map from possible actions (1, 2, 3) -> (0, 1, 2)
self.network.a: [actions[j] - 1],
self.network.r: [r]}
self.sess.run([self.update_policy_gradients,
self.update_value_gradients],
feed_dict)
self.sess.run([self.apply_policy_gradients,
self.apply_value_gradients])
self.sess.run([self.zero_policy_gradients,
self.zero_value_gradients])
self.steps += 1
return done
| [
"[email protected]"
]
| |
e707dff5bcaa52d36f0d56604c629444190857e3 | a88a99fb3f754649db06ad86d22b5cb0d2d1e19c | /scholariumat/studies/admin.py | a7b9bd11bef2d7c203c61c32189fdf63a81e8547 | [
"MIT"
]
| permissive | valuehack/scholariumat | 91ec59647948759d917ce7077d06b0aa9618c807 | 47c13f3429b95b9ad5ca59b45cf971895260bb5c | refs/heads/master | 2022-12-07T22:20:23.967854 | 2020-04-09T22:05:52 | 2020-04-09T22:05:52 | 135,466,121 | 0 | 3 | MIT | 2022-12-06T18:38:22 | 2018-05-30T15:55:14 | JavaScript | UTF-8 | Python | false | false | 165 | py | from django.contrib import admin
from products.admin import ProductBaseAdmin
from .models import StudyProduct
admin.site.register(StudyProduct, ProductBaseAdmin)
| [
"[email protected]"
]
| |
f66ef7744bdf8ef89f523383bc34e9bfdcf7a44e | 0edd5346cac98bb2cf288560310807899e7f334a | /Lesson6b.py | 3584a86e8f806ff40329a7a0348621b1b6b99c2f | []
| no_license | modcomlearning/Python5thJuly | 741aae36e7943656e7036957eecefaa7fca11251 | 9dcf7bb46dceb526f8be8fff12faf1452741a9c8 | refs/heads/master | 2023-07-08T02:23:55.657170 | 2021-08-11T17:45:09 | 2021-08-11T17:45:09 | 387,753,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | f = open('modcom.txt','r+')
print(f.readlines()) | [
"[email protected]"
]
| |
f64efc42f03f469457ffaf5dd385b81fe2ed4704 | 0c5dd1b89b686d23b536c9f51b30b2f5e69ff399 | /edit_form.py | adc3ab8524f18d52cfe4e708bf3488e3bbda4a5a | []
| no_license | blazprog/codesnips | 23fc57233fc197866c8c539df280d8792de098a4 | 1c307b74b5a00cbe339c86b3e37b101ad0921fcb | refs/heads/master | 2021-01-21T10:45:48.507153 | 2018-11-19T11:32:58 | 2018-11-19T11:32:58 | 101,984,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,615 | py | # -*- coding: utf-8 -*-
import sys
import PyQt5.QtWidgets as qtw
# from PyQt5.QtWidgets import QWidget, QApplication, QDialog, \
### QMdiSubWindow
from PyQt5.QtSql import *
conn = QSqlDatabase.database()
class WordBrowse(QSqlTableModel):
def __init__(self, parent=None):
super().__init__(parent)
self.setTable('ozegov')
self.select()
class EditForm(qtw.QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.initUI()
def initUI(self):
self.setWindowTitle('New Edit Form')
lblWord = qtw.QLabel('Word')
lbl = qtw.QLabel("My Label")
self.txtWord = qtw.QLineEdit()
lblDescription = qtw.QLabel('Description')
self.txtDescription = qtw.QTextEdit()
self.main_layout = qtw.QVBoxLayout()
#self.main_layout = qtv.QLabel()
self.main_layout.addWidget(lblWord)
self.main_layout.addWidget(lbl)
self.main_layout.addWidget(self.txtWord)
self.main_layout.addWidget(lblDescription)
self.main_layout.addWidget(self.txtDescription)
self.setLayout(self.main_layout)
self.model = WordBrowse()
self.mapper = qtw.QDataWidgetMapper(self)
self.mapper.setSubmitPolicy(qtw.QDataWidgetMapper.ManualSubmit)
self.mapper.setModel(self.model)
self.mapper.addMapping(self.txtWord, 0)
self.mapper.addMapping(self.txtDescription, 1)
self.mapper.toFirst()
self.show()
if __name__ == '__main__':
app = qtw.QApplication(sys.argv)
ef = EditForm()
ef.show()
sys.exit(app.exec_())
| [
"[email protected]"
]
| |
c0fb1b2ae995c53cd776a5dd076d79a17da945dc | 50008b3b7fb7e14f793e92f5b27bf302112a3cb4 | /recipes/Python/189060_Using_Berkeley_DB_Database/recipe-189060.py | c21520218466957b07c688f8c45e5007591b0523 | [
"Python-2.0",
"MIT"
]
| permissive | betty29/code-1 | db56807e19ac9cfe711b41d475a322c168cfdca6 | d097ca0ad6a6aee2180d32dce6a3322621f655fd | refs/heads/master | 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 | MIT | 2021-02-24T15:40:00 | 2021-02-24T11:31:15 | Python | UTF-8 | Python | false | false | 1,025 | py | #!/usr/bin/python
from bsddb3 import db # the Berkeley db data base
# Part 1: Create database and insert 4 elements
#
filename = 'fruit'
# Get an instance of BerkeleyDB
fruitDB = db.DB()
# Create a database in file "fruit" with a Hash access method
# There are also, B+tree and Recno access methods
fruitDB.open(filename, None, db.DB_HASH, db.DB_CREATE)
# Print version information
print '\t', db.DB_VERSION_STRING
# Insert new elements in database
fruitDB.put("apple","red")
fruitDB.put("orange","orange")
fruitDB.put("banana","yellow")
fruitDB.put("tomato","red")
# Close database
fruitDB.close()
# Part 2: Open database and write its contents out
#
fruitDB = db.DB()
# Open database
# Access method: Hash
# set isolation level to "dirty read (read uncommited)"
fruitDB.open(filename, None, db.DB_HASH, db.DB_DIRTY_READ)
# get database cursor and print out database content
cursor = fruitDB.cursor()
rec = cursor.first()
while rec:
print rec
rec = cursor.next()
fruitDB.close()
| [
"[email protected]"
]
| |
1531ec9143a6b9a180d6b727f62af76a9f8dd0c3 | 33d7d66b287f61e280ba18a6d24de3d7f437665e | /src/hp3par_exporter/prometheus_metrics.py | a83689554a68b37871cffe19ec742516b020e050 | []
| no_license | ycyr/hp3par-exporter | 13578a1ff61699ff91c52ddfd3ec8d3f506af8c3 | fef3dd8cdce96a0327583d03943a72cbb40c86b5 | refs/heads/master | 2020-09-15T11:53:58.833163 | 2019-08-29T13:19:38 | 2019-08-29T13:54:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | from prometheus_client import Gauge
from prometheus_client import REGISTRY
registry = REGISTRY
gauge_hp3par_total_capacity_mib = Gauge('hp3par_totalCapacityMiB', 'Total system capacity in MiB', ["id", "hp3par_name"])
gauge_hp3par_allocated_capacity_mib = Gauge('hp3par_allocatedCapacityMiB',
'Total allowed capacity in MiB', ["id", "hp3par_name"])
gauge_hp3par_free_capacity_mib = Gauge('hp3par_freeCapacityMiB',
'Total free capacity in MiB', ["id", "hp3par_name"])
gauge_hp3par_failed_capacity_mib = Gauge('hp3par_failedCapacityMiB',
'Total failed capacity in MiB', ["id", "hp3par_name"])
| [
"[email protected]"
]
| |
4f7eac54dc31a8805d603dd86392408a9df4a2b8 | d42dd7e79a7ec4604005867456a21eeee78c4ece | /venv/lib/python3.7/operator.py | ce890d762c95d907d890c90f7b7a4eac109e46c3 | []
| no_license | balakrishnans0214/Dashdropdown | c5d9ebae8f0a2cd04aaf4215bee656bd65982d6f | 9df8e73812743df21f28a18ffcea725f1c6dc935 | refs/heads/master | 2020-07-13T21:00:28.791997 | 2019-12-11T11:52:01 | 2019-12-11T11:52:01 | 205,154,134 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | /home/client/anaconda3/lib/python3.7/operator.py | [
"[email protected]"
]
| |
6540536a247411364d8ade39bb77998a6171cff0 | 926621c29eb55046f9f59750db09bdb24ed3078e | /lib/googlecloudsdk/core/util/retry.py | 48f365c6786b46490eb76b1628fd11ba78d981e5 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | bopopescu/SDK | 525d9b29fb2e901aa79697c9dcdf5ddd852859ab | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | refs/heads/master | 2022-11-22T18:24:13.464605 | 2016-05-18T16:53:30 | 2016-05-18T16:53:30 | 282,322,505 | 0 | 0 | NOASSERTION | 2020-07-24T21:52:25 | 2020-07-24T21:52:24 | null | UTF-8 | Python | false | false | 10,534 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of retrying logic."""
import functools
import itertools
import random
import sys
import time
from googlecloudsdk.third_party.py27 import py27_collections as collections
_DEFAULT_JITTER_MS = 1000
# TODO(user): replace retry logic elsewhere
# (appengine/lib, compute, bigquery ...) with this implementation.
class RetryerState(object):
"""Object that holds the state of the retryer."""
def __init__(self, retrial, time_passed_ms, time_to_wait_ms):
"""Initializer for RetryerState.
Args:
retrial: int, the retry attempt we are currently at.
time_passed_ms: int, number of ms that passed since we started retryer.
time_to_wait_ms: int, number of ms to wait for the until next trial.
If this number is -1, it means the iterable item that specifies the
next sleep value has raised StopIteration.
"""
self.retrial = retrial
self.time_passed_ms = time_passed_ms
self.time_to_wait_ms = time_to_wait_ms
class RetryException(Exception):
"""Raised to stop retrials on failure."""
def __init__(self, message, last_result, state):
self.message = message
self.last_result = last_result
self.state = state
super(RetryException, self).__init__(message)
def __str__(self):
return ('last_result={last_result}, last_retrial={last_retrial}, '
'time_passed_ms={time_passed_ms},'
'time_to_wait={time_to_wait_ms}'.format(
last_result=self.last_result,
last_retrial=self.state.retrial,
time_passed_ms=self.state.time_passed_ms,
time_to_wait_ms=self.state.time_to_wait_ms))
class WaitException(RetryException):
"""Raised when timeout was reached."""
class MaxRetrialsException(RetryException):
"""Raised when too many retrials reached."""
class Retryer(object):
"""Retries a function based on specified retry strategy."""
def __init__(self, max_retrials=None, max_wait_ms=None,
exponential_sleep_multiplier=None, jitter_ms=_DEFAULT_JITTER_MS,
status_update_func=None, wait_ceiling_ms=None):
"""Initializer for Retryer.
Args:
max_retrials: int, max number of retrials before raising RetryException.
max_wait_ms: int, number of ms to wait before raising
exponential_sleep_multiplier: float, The exponential factor to use on
subsequent retries.
jitter_ms: int, random [0, jitter_ms] additional value to wait.
status_update_func: func(result, state) called right after each trial.
wait_ceiling_ms: int, maximum wait time between retries, regardless of
modifiers added like exponential multiplier or jitter.
"""
self._max_retrials = max_retrials
self._max_wait_ms = max_wait_ms
self._exponential_sleep_multiplier = exponential_sleep_multiplier
self._jitter_ms = jitter_ms
self._status_update_func = status_update_func
self._wait_ceiling_ms = wait_ceiling_ms
def _RaiseIfStop(self, result, state):
if self._max_wait_ms is not None:
if state.time_passed_ms + state.time_to_wait_ms > self._max_wait_ms:
raise WaitException('Timeout', result, state)
if self._max_retrials is not None and self._max_retrials <= state.retrial:
raise MaxRetrialsException('Reached', result, state)
def _GetTimeToWait(self, last_retrial, sleep_ms):
"""Get time to wait after applying modifyers.
Apply the exponential sleep multiplyer, jitter and ceiling limiting to the
base sleep time.
Args:
last_retrial: int, which retry attempt we just tried. First try this is 0.
sleep_ms: int, how long to wait between the current trials.
Returns:
int, ms to wait before trying next attempt with all waiting logic applied.
"""
wait_time_ms = sleep_ms
if wait_time_ms:
if self._exponential_sleep_multiplier:
wait_time_ms *= self._exponential_sleep_multiplier ** last_retrial
if self._jitter_ms:
wait_time_ms += random.random() * self._jitter_ms
if self._wait_ceiling_ms:
wait_time_ms = min(wait_time_ms, self._wait_ceiling_ms)
return wait_time_ms
return 0
def RetryOnException(self, func, args=None, kwargs=None,
should_retry_if=None, sleep_ms=None):
"""Retries the function if an exception occurs.
Args:
func: The function to call and retry.
args: a sequence of positional arguments to be passed to func.
kwargs: a dictionary of positional arguments to be passed to func.
should_retry_if: func(exc_type, exc_value, exc_traceback, state) that
returns True or False.
sleep_ms: int or iterable for how long to wait between trials.
Returns:
Whatever the function returns.
Raises:
RetryException, WaitException: if function is retries too many times,
or time limit is reached.
"""
args = args if args is not None else ()
kwargs = kwargs if kwargs is not None else {}
def TryFunc():
try:
return func(*args, **kwargs), None
except: # pylint: disable=bare-except
return None, sys.exc_info()
if should_retry_if is None:
should_retry = lambda x, s: x[1] is not None
else:
def ShouldRetryFunc(try_func_result, state):
exc_info = try_func_result[1]
if exc_info is None:
# No exception, no reason to retry.
return False
return should_retry_if(exc_info[0], exc_info[1], exc_info[2], state)
should_retry = ShouldRetryFunc
result, exc_info = self.RetryOnResult(
TryFunc, should_retry_if=should_retry, sleep_ms=sleep_ms)
if exc_info:
# Exception that was not retried was raised. Re-raise.
raise exc_info[0], exc_info[1], exc_info[2]
return result
def RetryOnResult(self, func, args=None, kwargs=None,
should_retry_if=None, sleep_ms=None):
"""Retries the function if the given condition is satisfied.
Args:
func: The function to call and retry.
args: a sequence of arguments to be passed to func.
kwargs: a dictionary of positional arguments to be passed to func.
should_retry_if: result to retry on or func(result, RetryerState) that
returns True or False if we should retry or not.
sleep_ms: int or iterable, for how long to wait between trials.
Returns:
Whatever the function returns.
Raises:
MaxRetrialsException: function retried too many times.
WaitException: time limit is reached.
"""
args = args if args is not None else ()
kwargs = kwargs if kwargs is not None else {}
start_time_ms = int(time.time() * 1000)
retrial = 0
if callable(should_retry_if):
should_retry = should_retry_if
else:
should_retry = lambda x, s: x == should_retry_if
if isinstance(sleep_ms, collections.Iterable):
sleep_gen = iter(sleep_ms)
else:
sleep_gen = itertools.repeat(sleep_ms)
while True:
result = func(*args, **kwargs)
time_passed_ms = int(time.time() * 1000) - start_time_ms
try:
sleep_from_gen = sleep_gen.next()
except StopIteration:
time_to_wait_ms = -1
else:
time_to_wait_ms = self._GetTimeToWait(retrial, sleep_from_gen)
state = RetryerState(retrial, time_passed_ms, time_to_wait_ms)
if not should_retry(result, state):
return result
if time_to_wait_ms == -1:
raise MaxRetrialsException('Sleep iteration stop', result, state)
if self._status_update_func:
self._status_update_func(result, state)
self._RaiseIfStop(result, state)
time.sleep(time_to_wait_ms / 1000.0)
retrial += 1
def RetryOnException(f=None, max_retrials=None, max_wait_ms=None,
sleep_ms=None, exponential_sleep_multiplier=None,
jitter_ms=_DEFAULT_JITTER_MS,
status_update_func=None,
should_retry_if=None):
"""A decorator to retry on exceptions.
Args:
f: a function to run possibly multiple times
max_retrials: int, max number of retrials before raising RetryException.
max_wait_ms: int, number of ms to wait before raising
sleep_ms: int or iterable, for how long to wait between trials.
exponential_sleep_multiplier: float, The exponential factor to use on
subsequent retries.
jitter_ms: int, random [0, jitter_ms] additional value to wait.
status_update_func: func(result, state) called right after each trail.
should_retry_if: func(exc_type, exc_value, exc_traceback, state) that
returns True or False.
Returns:
A version of f that is executed potentially multiple times and that
yields the first returned value or the last exception raised.
"""
if f is None:
# Returns a decorator---based on retry Retry with max_retrials,
# max_wait_ms, sleep_ms, etc. fixed.
return functools.partial(
RetryOnException,
exponential_sleep_multiplier=exponential_sleep_multiplier,
jitter_ms=jitter_ms,
max_retrials=max_retrials,
max_wait_ms=max_wait_ms,
should_retry_if=should_retry_if,
sleep_ms=sleep_ms,
status_update_func=status_update_func)
@functools.wraps(f)
def DecoratedFunction(*args, **kwargs):
retryer = Retryer(
max_retrials=max_retrials,
max_wait_ms=max_wait_ms,
exponential_sleep_multiplier=exponential_sleep_multiplier,
jitter_ms=jitter_ms,
status_update_func=status_update_func)
try:
return retryer.RetryOnException(f, args=args, kwargs=kwargs,
should_retry_if=should_retry_if,
sleep_ms=sleep_ms)
except MaxRetrialsException as mre:
to_reraise = mre.last_result[1]
raise to_reraise[0], to_reraise[1], to_reraise[2]
return DecoratedFunction
| [
"[email protected]"
]
| |
2a30fcfc232100c738167d147999db51ad75955c | a26923adfffd7cab44d15f76156476ccc4f11b70 | /src/GimelStudio/node_graph/node_graph.py | 30a537564c35af3aca4078c0772b18da0fba0c5c | [
"Apache-2.0",
"MIT"
]
| permissive | MarioPeper/Gimel-Studio | 89c81aeb133390f0c8de9286ca5aa92d9a5af9ff | 7eee20adfdb701034017131ebbd706e8f8166b81 | refs/heads/master | 2022-12-11T14:51:50.127304 | 2020-09-02T21:59:16 | 2020-09-02T21:59:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,358 | py | ## ----------------------------------------------------------------------------
## Gimel Studio Copyright 2020 Noah Rahm, Correct Syntax. All rights reserved.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
## FILE: node_graph.py
## AUTHOR(S): Noah Rahm
## PURPOSE: Define the node graph panel
## ----------------------------------------------------------------------------
# =============================================================================
# File Contents
# =============================================================================
# Due to the size of this file, here is a table of contents of how things
# should be ordered in this file to keep things tidy:
# -----------------------------------------------------------------------------
# 0. Imports & IDs
# 1. Init method(s)
# 2. Methods dealing with drawing the NodeGraph, coord utils, etc.
# 3. Event handler methods starting with "On". (e.g: OnLeftDown)
# 4. Value methods starting with "Get" or "Set" (e.g: GetParent)
# 5. Other
# -----------------------------------------------------------------------------
import math
import wx
import wx.adv
from GimelStudio.utils import DrawGrid
from GimelStudio.node import Node, Wire
# Create IDs
ID_SELECTION_BBOX = wx.NewIdRef()
# Max number of nodes that can be added to the menu is 100, currently
CONTEXTMENU_ADDNODE_IDS = wx.NewIdRef(100)
ID_CONTEXTMENU_DELETENODE = wx.NewIdRef()
ID_CONTEXTMENU_DELETENODES = wx.NewIdRef()
ID_CONTEXTMENU_ENABLEDISABLENODE = wx.NewIdRef()
ID_CONTEXTMENU_DUPLICATENODE = wx.NewIdRef()
ID_CONTEXTMENU_DESELECTALLNODES = wx.NewIdRef()
ID_CONTEXTMENU_SELECTALLNODES = wx.NewIdRef()
ID_CONTEXTMENU_TOGGLENODEPREVIEWS = wx.NewIdRef()
class NodeGraph(wx.ScrolledCanvas):
def __init__(self, parent, size=wx.DefaultSize):
wx.ScrolledCanvas.__init__(self, parent, size=size)
self._parent = parent
# Set Node Graph to 10000x10000 pixels max
self._maxWidth = 10000
self._maxHeight = 10000
self._nodes = {}
self._selectedNodes = []
self._activeNode = None
self._srcNode = None
self._srcPlug = None
self._tmpWire = None
self._bboxRect = None
self._bboxStart = None
self._middlePnt = None
self._nodePreviewToggled = False
self._nodeMenuItemIdMapping = {}
self._pdc = wx.adv.PseudoDC()
# Handle scrolling
self.SetScrollbars(1, 1, self._maxWidth, self._maxHeight, 5000, 5000)
# Nodegraph Bindings
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_ERASE_BACKGROUND, lambda x: None)
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.Bind(wx.EVT_MIDDLE_DOWN, self.OnMiddleDown)
self.Bind(wx.EVT_MIDDLE_UP, self.OnMiddleUp)
# Context menu bindings
self.Bind(wx.EVT_CONTEXT_MENU, self.OnContextMenu)
self._parent.Bind(
wx.EVT_MENU,
self.OnDeleteNode,
id=ID_CONTEXTMENU_DELETENODE
)
self._parent.Bind(
wx.EVT_MENU,
self.OnDeleteNodes,
id=ID_CONTEXTMENU_DELETENODES
)
self._parent.Bind(
wx.EVT_MENU,
self.OnEnableDisableNode,
id=ID_CONTEXTMENU_ENABLEDISABLENODE
)
self._parent.Bind(
wx.EVT_MENU,
self.OnSelectAllNodes,
id=ID_CONTEXTMENU_SELECTALLNODES
)
self._parent.Bind(
wx.EVT_MENU,
self.OnDeselectAllNodes,
id=ID_CONTEXTMENU_DESELECTALLNODES
)
self._parent.Bind(
wx.EVT_MENU,
self.OnDuplicateNode,
id=ID_CONTEXTMENU_DUPLICATENODE
)
self._parent.Bind(
wx.EVT_MENU,
self.OnToggleNodePreviews,
id=ID_CONTEXTMENU_TOGGLENODEPREVIEWS
)
# Keyboard shortcut bindings
self.accel_tbl = wx.AcceleratorTable([(wx.ACCEL_ALT, ord('M'),
ID_CONTEXTMENU_ENABLEDISABLENODE),
(wx.ACCEL_ALT, ord('X'),
ID_CONTEXTMENU_DELETENODE),
(wx.ACCEL_SHIFT, ord('X'),
ID_CONTEXTMENU_DELETENODES),
(wx.ACCEL_SHIFT, ord('D'),
ID_CONTEXTMENU_DUPLICATENODE),
(wx.ACCEL_SHIFT, ord('P'),
ID_CONTEXTMENU_TOGGLENODEPREVIEWS)
])
self._parent.SetAcceleratorTable(self.accel_tbl)
def OnPaint(self, event):
dc = wx.BufferedPaintDC(self)
dc = wx.GCDC(dc)
dc.SetBackground(wx.Brush(wx.Colour(self.Theme["node_graph_bg"])))
dc.Clear()
rect = self.GetViewableWindowRegion()
self.DoPrepareDC(dc)
# Draw the grid background if the active UI theme allows it
if self.Theme["node_graph_grid"] == "true":
self._DrawGridBackground(dc, rect)
self._pdc.DrawToDCClipped(dc, rect)
def _DrawGridBackground(self, dc, rect):
dc.SetBrush(wx.Brush(wx.Colour('#373737'), wx.CROSS_HATCH))
dc.DrawRectangle(rect)
def ConvertCoords(self, pnt):
""" Convert coords to account for scrolling.
:param pnt: the given wx.Point coord to convert
:returns: wx.Point
"""
xv, yv = self.GetViewStart()
xd, yd = self.GetScrollPixelsPerUnit()
return wx.Point(pnt[0] + (xv * xd), pnt[1] + (yv * yd))
def GetViewableWindowRegion(self):
""" Get the shown scrolled region of the window based on
the current scrolling.
:returns: wx.Rect
"""
xv, yv = self.GetViewStart()
xd, yd = self.GetScrollPixelsPerUnit()
x, y = (xv * xd, yv * yd)
rgn = self.GetUpdateRegion()
rgn.Offset(x, y)
return rgn.GetBox()
def RefreshGraph(self):
""" Refreshes the nodegraph so that everything is redrawn.
Use after ``.Draw()`` calls:
node.Draw(self._pdc)
self.RefreshGraph()
"""
rect = wx.Rect(0, 0, self._maxWidth, self._maxHeight)
self.RefreshRect(rect, False)
self.Refresh()
def OnSelectMenuItem(self, event):
""" Event when an "Add Node" menu item is selected, which adds the
node to the Node Graph.
"""
self.AddNode(self._nodeMenuItemIdMapping[event.GetId()], where="CURSOR")
def OnContextMenu(self, event):
""" Event to create Node Graph context menu on left click. """
# Context menu
contextmenu = wx.Menu()
# Add node submenu
addnodemenu = wx.Menu()
# Add submenus
inputnodemenu = wx.Menu()
drawnodemenu = wx.Menu()
distortnodemenu = wx.Menu()
valuenodemenu = wx.Menu()
filternodemenu = wx.Menu()
blendnodemenu = wx.Menu()
colornodemenu = wx.Menu()
convertnodemenu = wx.Menu()
othernodemenu = wx.Menu()
# List nodes in menu
nodes = self.GetNodeRegistry().GetAvailableNodes()
i = 0
for node_name in nodes:
node_obj = nodes[node_name]()
node_category = node_obj.NodeCategory
node_label = node_obj.NodeLabel
if node_category == "INPUT":
inputnodemenu.Append(CONTEXTMENU_ADDNODE_IDS[i], node_label)
elif node_category == "DRAW":
drawnodemenu.Append(CONTEXTMENU_ADDNODE_IDS[i], node_label)
elif node_category == "DISTORT":
distortnodemenu.Append(CONTEXTMENU_ADDNODE_IDS[i], node_label)
elif node_category == "VALUE":
valuenodemenu.Append(CONTEXTMENU_ADDNODE_IDS[i], node_label)
elif node_category == "FILTER":
filternodemenu.Append(CONTEXTMENU_ADDNODE_IDS[i], node_label)
elif node_category == "BLEND":
blendnodemenu.Append(CONTEXTMENU_ADDNODE_IDS[i], node_label)
elif node_category == "COLOR":
colornodemenu.Append(CONTEXTMENU_ADDNODE_IDS[i], node_label)
elif node_category == "CONVERT":
convertnodemenu.Append(CONTEXTMENU_ADDNODE_IDS[i], node_label)
else:
othernodemenu.Append(CONTEXTMENU_ADDNODE_IDS[i], node_label)
self._nodeMenuItemIdMapping[CONTEXTMENU_ADDNODE_IDS[i]] = node_name
self.Bind(
wx.EVT_MENU,
self.OnSelectMenuItem,
id=CONTEXTMENU_ADDNODE_IDS[i]
)
i += 1
addnodemenu.AppendSubMenu(inputnodemenu, "Input")
addnodemenu.AppendSubMenu(filternodemenu, "Filter")
addnodemenu.AppendSubMenu(colornodemenu, "Color")
addnodemenu.AppendSubMenu(distortnodemenu, "Distort")
addnodemenu.AppendSubMenu(blendnodemenu, "Blend")
addnodemenu.AppendSubMenu(drawnodemenu, "Draw")
addnodemenu.AppendSubMenu(convertnodemenu, "Convert")
addnodemenu.AppendSubMenu(valuenodemenu, "Value")
addnodemenu.AppendSubMenu(othernodemenu, "Other")
contextmenu.Append(wx.ID_ANY, "Add Node", addnodemenu)
# If there is an active node, then we know
# that there shouldn't be any other nodes
# selected, thus we handle the active node first.
if self._activeNode != None:
# Do not allow the output node to be
# deleted, duplicated or disabled at all.
if self._activeNode.IsCompositeOutput() != True:
contextmenu.Append(
ID_CONTEXTMENU_DUPLICATENODE, "Duplicate\tShift+D"
)
contextmenu.Append(
ID_CONTEXTMENU_DELETENODE, "Delete\tAlt+X"
)
if self._activeNode.IsDisabled() == True:
contextmenu.Append(
ID_CONTEXTMENU_ENABLEDISABLENODE, "Toggle Mute\tAlt+M"
)
else:
contextmenu.Append(
ID_CONTEXTMENU_ENABLEDISABLENODE, "Toggle Mute\tAlt+M"
)
else:
if self._selectedNodes != []:
contextmenu.Append(
ID_CONTEXTMENU_DELETENODES, "Delete Selected\tShift+X"
)
contextmenu.Append(
ID_CONTEXTMENU_SELECTALLNODES,
"Select All"
)
contextmenu.Append(
ID_CONTEXTMENU_DESELECTALLNODES,
"Deselect All"
)
contextmenu.Append(
ID_CONTEXTMENU_TOGGLENODEPREVIEWS,
"Toggle Node Previews\tShift+P"
)
# Popup the menu. If an item is selected then its handler
# will be called before PopupMenu returns.
self.PopupMenu(contextmenu)
contextmenu.Destroy()
def OnDeleteNodes(self, event):
""" Event that deletes the selected nodes. """
self.DeleteNodes()
self._parent.Render()
def OnDeleteNode(self, event):
""" Event that deletes a single selected node. """
if self._activeNode != None \
and self._activeNode.IsCompositeOutput() != True:
self._activeNode.Delete()
self._activeNode = None
# Update the properties panel so that the deleted
# nodes' properties are not still shown!
self.NodePropertiesPanel.UpdatePanelContents(self.GetActiveNode())
self.RefreshGraph()
self._parent.Render()
def OnEnableDisableNode(self, event):
""" Event that toggles a node's disabled/enabled state. """
if self._activeNode.IsDisabled() == True:
self._activeNode.SetDisabled(False)
else:
self._activeNode.SetDisabled(True)
self._activeNode.Draw(self._pdc)
self.RefreshGraph()
def OnSelectAllNodes(self, event):
""" Event that selects all the nodes in the Node Graph. """
for node_id in self._nodes:
node = self._nodes[node_id]
if node.IsActive() == True:
node.SetActive(False)
node.SetSelected(True)
node.Draw(self._pdc)
self._selectedNodes.append(node)
self.RefreshGraph()
def OnDeselectAllNodes(self, event):
""" Event that deselects all the currently selected nodes. """
for node_id in self._nodes:
node = self._nodes[node_id]
node.SetSelected(False)
node.Draw(self._pdc)
self._selectedNodes = []
self.RefreshGraph()
def OnDuplicateNode(self, event):
""" Event that duplicates the currently selected node. """
self.DuplicateNode(self._activeNode)
def OnToggleNodePreviews(self, event):
""" Event that toggles the thumbnail preview of all the nodes. """
self.ToggleNodePreviews()
def OnLeftDown(self, event):
pnt = event.GetPosition()
winpnt = self.ConvertCoords(pnt)
self._srcNode = self.NodeHitTest(winpnt)
# Handle adding a node from the node registry
# if LEFT mousebtn and the CTRL key are down.
# TODO: UNUSED AT THE MOMENT!
selected_item = None#self.GetParent().GetNodeRegistry().GetSelectedItem()
if wx.GetKeyState(wx.WXK_CONTROL) == True and selected_item != None:
self.AddNode(selected_item)
else:
# The node has been clicked
if self._srcNode != None:
self._HandleNodeSelection()
# Handle plugs and wires
self._srcPlug = self._srcNode.HitTest(winpnt.x, winpnt.y)
if self._srcPlug != None:
# Handle disconnecting and connecting plugs
if self._srcPlug.GetWires() == []:
# Do not allow connections from anything except
# the output socket
if self._srcPlug.IsOutputType() == True:
pnt1 = self._srcNode.GetRect().GetPosition() \
+ self._srcPlug.GetPosition()
self._tmpWire = Wire(
self,
pnt1,
pnt,
None,
None,
self._srcPlug.GetType(),
curvature=self.Theme["node_wire_curving"]
)
else:
# Do not allow disconnections from the output socket
if self._srcPlug.IsOutputType() != True:
wires = self._srcPlug.GetWires()
dst = wires[0].dstPlug
self._srcPlug = wires[0].srcPlug
dst.Disconnect(
self._srcPlug,
render=self.UserPrefs.GetRendererAutoRender()
)
# Create the temp wire again
pnt = event.GetPosition()
winpnt = self.ConvertCoords(pnt)
pnt1 = self._srcPlug.GetNode().GetRect().GetPosition() \
+ self._srcPlug.GetPosition()
# Draw the temp wire with the new values
self._tmpWire = Wire(
self,
pnt1,
pnt,
None,
None,
self._srcPlug.GetType(),
curvature=self.Theme["node_wire_curving"]
)
# Important: we re-assign the source node variable
self._srcNode = self._srcPlug.GetNode()
else:
# Start the box select bbox
self._bboxStart = winpnt
self._lastPnt = pnt
# Refresh the nodegraph
self.RefreshGraph()
def OnMotion(self, event):
pnt = event.GetPosition()
winpnt = self.ConvertCoords(pnt)
# If the MMB is down, calculate the scrolling of the graph
if event.MiddleIsDown() == True:
self.ScrollNodeGraph(
winpnt[0] - self._middlePnt[0],
winpnt[1] - self._middlePnt[1]
)
# Draw box selection bbox
if event.LeftIsDown() == True \
and self._srcNode == None and self._bboxStart != None:
self._bboxRect = wx.Rect(
topLeft=self._bboxStart,
bottomRight=winpnt
)
self._pdc.RemoveId(ID_SELECTION_BBOX)
self._pdc.SetId(ID_SELECTION_BBOX)
self._pdc.SetPen(
wx.Pen(wx.Colour('#C2C2C2'), 2.5, wx.PENSTYLE_SHORT_DASH)
)
self._pdc.SetBrush(
wx.Brush(wx.Colour(100, 100, 100, 56), wx.SOLID)
)
self._pdc.DrawRectangle(self._bboxRect)
# This is needed here because the
# box select must update in realtime.
self.RefreshGraph()
if not event.LeftIsDown() or self._srcNode == None:
return
# Move the node
if self._srcNode.IsDisabled() != True:
if self._srcPlug == None:
dpnt = pnt - self._lastPnt
self._pdc.TranslateId(self._srcNode.GetId(), dpnt[0], dpnt[1])
rect = self._pdc.GetIdBounds(self._srcNode.GetId())
self._lastPnt = pnt
self._srcNode.SetRect(rect)
# Redraw the wires
if self._srcNode.GetPlugs() != []:
for plug in self._srcNode.GetPlugs():
for wire in plug.GetWires():
pnt1 = wire.srcNode.GetRect().GetPosition() \
+ wire.srcPlug.GetPosition()
pnt2 = wire.dstNode.GetRect().GetPosition() \
+ wire.dstPlug.GetPosition()
self.DrawNodeWire(self._pdc, wire, pnt1, pnt2)
elif self._tmpWire != None:
# Set the wire to be active when it is being edited.
self._tmpWire.SetActive(True)
self.DrawNodeWire(self._pdc, self._tmpWire, pnt2=winpnt)
# Refresh the nodegraph
self.RefreshGraph()
def OnLeftUp(self, event):
# Attempt to make a connection
if self._srcNode != None:
pnt = event.GetPosition()
winpnt = self.ConvertCoords(pnt)
dstnode = self.NodeHitTest(winpnt)
if dstnode != None:
rect = self._pdc.GetIdBounds(self._srcNode.GetId())
dstplug = dstnode.HitTest(
winpnt.x, winpnt.y,
thumb_btn_active=True
)
# Make sure not to allow the same datatype or
# 'plug type' of sockets to be connected!
if dstplug != None \
and self._srcPlug.GetType() != dstplug.GetType() \
and self._srcNode.GetId() != dstnode.GetId() \
and self._srcPlug.GetDataType() == dstplug.GetDataType():
# Only allow a single node to be
# connected to any one socket.
if len(dstplug.GetWires()) < 1:
self._srcPlug.Connect(
dstplug,
render=self.UserPrefs.GetRendererAutoRender()
)
# If there is already a connection,
# but a wire is "dropped" into the plug
# disconnect the last connection and
# connect the current wire.
else:
wires = dstplug.GetWires()
dst = wires[0].dstPlug
src = wires[0].srcPlug
dst.Disconnect(src, render=False)
self._srcPlug.Connect(
dstplug,
render=self.UserPrefs.GetRendererAutoRender()
)
# We can erase the temp wire.
if self._tmpWire != None:
rect = self._pdc.GetIdBounds(self._tmpWire.GetId())
self._pdc.RemoveId(self._tmpWire.GetId())
# Clear selection bbox and set nodes as selected
if self._bboxRect != None:
self._pdc.RemoveId(ID_SELECTION_BBOX)
self._selectedNodes = self.BoxSelectHitTest(self._bboxRect)
for node in self._selectedNodes:
if node.IsSelected() != True and node.IsActive() != True:
node.SetSelected(True)
node.Draw(self._pdc)
# Reset all values
self._srcNode = None
self._srcPlug = None
self._tmpWire = None
self._bboxRect = None
# Update the properties panel
self.NodePropertiesPanel.UpdatePanelContents(self.GetActiveNode())
# Refresh the nodegraph
self.RefreshGraph()
def _HandleNodeSelection(self):
# Set the active node
if self._activeNode == None:
self._activeNode = self._srcNode
self._activeNode.SetActive(True)
self._activeNode.Draw(self._pdc)
else:
# We check to make sure this is not just the same
# node clicked again, then we switch the active states.
if self._srcNode.GetId() != self._activeNode.GetId():
self._activeNode.SetActive(False)
self._activeNode.Draw(self._pdc)
self._activeNode = self._srcNode
self._activeNode.SetActive(True)
self._activeNode.Draw(self._pdc)
# When a node is active, all the selected nodes
# need to be set to the unselected state.
if self.GetSelectedNodes() != []:
for node in self.GetSelectedNodes():
node.SetSelected(False)
node.Draw(self._pdc)
def OnMiddleDown(self, event):
""" Event that updates the cursor. """
winpnt = self.ConvertCoords(event.GetPosition())
self._middlePnt = winpnt
# Update mouse cursor
self.SetCursor(wx.Cursor(wx.CURSOR_SIZING))
def OnMiddleUp(self, event):
""" Event that resets the cursor. """
# Reset mouse cursor
self.SetCursor(wx.Cursor(wx.CURSOR_ARROW))
@property
def NodePropertiesPanel(self):
return self._parent.GetNodePropertyPanel()
@property
def Theme(self):
return self._parent.Theme
@property
def UserPrefs(self):
return self._parent.GetUserPrefManager()
def GetParent(self):
return self._parent
def GetNodes(self):
""" Returns a list of all the nodes in the current
graph. Used by the render engine to access the nodes. """
return self._nodes
def GetSelectedNodes(self):
return self._selectedNodes
def SetSelectedNodes(self, selectednodes):
self._selectedNodes = selectednodes
def GetActiveNode(self):
return self._activeNode
def SetActiveNode(self, activenode):
self._activeNode = activenode
def GetNodeRegistry(self):
return self._parent.GetNodeRegistry()
def GetPDC(self):
return self._pdc
@staticmethod
def GetNodePlug(node, plug):
return node.GetPlug(plug)
@staticmethod
def DrawNodeWire(dc, wire, pnt1=None, pnt2=None):
if pnt1 != None:
wire.SetPoint1(pnt1)
if pnt2 != None:
wire.SetPoint2(pnt2)
wire.Draw(dc)
def ScrollNodeGraph(self, pos_x, pos_y):
""" Scrolls the scrollbars to the specified position. """
scrollpos_x = self.GetScrollPos(wx.HORIZONTAL)
scrollpos_y = self.GetScrollPos(wx.VERTICAL)
self.Scroll(scrollpos_x-pos_x,
scrollpos_y-pos_y
)
self.RefreshGraph()
def NodeHitTest(self, pnt):
idxs = self._pdc.FindObjects(pnt[0], pnt[1], 5)
hits = [
idx
for idx in idxs
if idx in self._nodes
]
if hits != []:
return self._nodes[hits[0]]
else:
# Make sure we deselect everything
for node in self._selectedNodes:
node.SetSelected(False)
node.Draw(self._pdc)
self._selectedNodes = []
if self._activeNode != None:
self._activeNode.SetActive(False)
self._activeNode.Draw(self._pdc)
self._activeNode = None
return None
def BoxSelectHitTest(self, bboxrect):
nodehits = []
for node in self._nodes.values():
if bboxrect.Intersects(node.GetRect()) == True:
nodehits.append(node)
if nodehits != []:
return nodehits
else:
# Make sure we deselect everything
for node in self._selectedNodes:
node.SetSelected(False)
node.Draw(self._pdc)
self._selectedNodes = []
return []
def UpdateAllNodes(self):
for nodeId in self.GetNodes():
self._nodes[nodeId].Draw(self.GetPDC(), False)
self.RefreshGraph()
def AddNode(self, name="", _id=wx.ID_ANY, pos=wx.Point(0, 0), where="DEFAULT"):
""" Adds a node of the given name to the Node Graph.
:param name: the node IDName string to add to the Node Graph. If this is an
empty string (default), it will default to the core Input Image node.
:param _id: id of the node. Creates a new id if not specified
:param pos: ``wx.Point`` position to add the node to the Node Graph
:param where: flag specifying different positioning for adding the node.
This value can be a string of either:
DEFAULT (default): position the node based on the ``pos`` param
CURSOR: position the node based on the current cursor position
"""
if where == "CURSOR":
pos = self.ConvertCoords(
self.ScreenToClient(wx.GetMousePosition())
)
# If the name param is an empty string, default to
# the core Input Image node.
if name == "":
name = "gimelstudiocorenode_image" # Yes, this is hard-coded...
node = self.GetNodeRegistry().CreateNode(self, name, pos, _id)
node_id = node.GetId()
# Default to toggle the Input node thumb open
if node.GetCategory() in ["INPUT"]:
node.SetThumbnailPreviewOpen(redraw=False)
node.Draw(self._pdc, False)
self._pdc.SetIdBounds(node_id, node.GetRect())
self._nodes[node_id] = node
self.RefreshGraph()
return node
def DeleteNodes(self):
""" Delete the currently selected nodes. This will refuse
to delete the Output Composite node though, for obvious reasons.
"""
for node in self._selectedNodes:
if node.IsCompositeOutput() != True:
node.Delete()
else:
# In the case that this is an output node, we
# want to deselect it, not delete it. :)
node.SetSelected(False)
node.Draw(self._pdc)
self._selectedNodes = []
if self._activeNode != None \
and self._activeNode.IsCompositeOutput() != True:
self._activeNode.Delete()
self._activeNode = None
# Update the properties panel so that the deleted
# nodes' properties are not still shown!
self.NodePropertiesPanel.UpdatePanelContents(self.GetActiveNode())
self.RefreshGraph()
def ResetToDefault(self):
""" Reset the Node Graph back to default. """
self._nodes = {}
self._activeNode = None
self._selectedNodes = []
self.GetPDC().RemoveAll()
self.RefreshGraph()
def ToggleNodePreviews(self):
""" Toggle node thumbnail previews. """
toggle = self.CalcNodePreviewToggle()
for node_id in self._nodes:
node = self._nodes[node_id]
if toggle == False:
node.SetThumbnailPreviewClosed()
else:
node.SetThumbnailPreviewOpen()
node.Draw(self._pdc)
self.RefreshGraph()
def CalcNodePreviewToggle(self):
""" Calculate whether all the node previews should be
opened or closed based on the number of previews toggled already.
:returns boolean: whether all node previews should be opened or not
"""
full_count = len(self._nodes)
toggled_count = 0
for node_id in self._nodes:
node = self._nodes[node_id]
if node.GetDrawThumb() == True:
toggled_count += 1
if toggled_count < (full_count - toggled_count):
return True
else:
return False
def DuplicateNode(self, node):
""" Duplicates the given ``Node`` object with its properties.
:param node: the ``Node`` object to duplicate
:returns: the duplicate ``Node`` object
"""
duplicate_node = self.AddNode(
node.GetIDName(),
_id=wx.ID_ANY,
where="CURSOR"
)
# Assign the same properties to the duplicate node object
for prop in node.GetEvaluationData()["properties"]:
duplicate_node.EditProperties(prop["name"], prop["value"])
self.RefreshGraph()
return duplicate_node
| [
"[email protected]"
]
| |
9c53ee59254457ca1ebe3696b9eec8d479047f35 | f2a5680231e205dc49a083578d9bd90e4603036c | /Grokking-Coding-Interview-Patterns/1. Sliding Window/smallestWindowContainingSubString.py | 28ac6fe6c76af028ec7572eff2a802f078411d8d | []
| no_license | flogothetis/Technical-Coding-Interviews-Algorithms-LeetCode | d592451f7d297fd52395e33dc67686e9990a663c | 7c8473fce4b5b5affbfde5ed8c39fdb89cbc77d4 | refs/heads/master | 2023-01-13T15:56:07.706164 | 2020-11-18T18:54:52 | 2020-11-18T18:54:52 | 281,101,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | py |
def smallestWindowContainingSubString (array, pattern):
# Put pattern in dictionary
dictionary = {}
for ch in pattern:
if ch not in dictionary:
dictionary[ch] = 0
dictionary[ch]+=1
windowStart = 0
globalMinWindow = len(array) + 1
match_ch = 0
for windowEnd in range (len(array)):
if (array[windowEnd] in dictionary ):
dictionary[array[windowEnd]]-=1
if(dictionary[array[windowEnd]] == 0):
match_ch+=1
while(match_ch == len(pattern)):
globalMinWindow = min (globalMinWindow, (windowEnd- windowStart +1))
if(array[windowStart] in dictionary):
if(dictionary[array[windowStart]] == 0):
match_ch-=1
dictionary[array[windowStart]]+=1
windowStart+=1
if (globalMinWindow <= len(array)):
return globalMinWindow
else:
return 0
def main():
print(smallestWindowContainingSubString("aabdec", "abc"))
print(smallestWindowContainingSubString("abdbca", "abc"))
print(smallestWindowContainingSubString("adcad", "abc"))
main()
| [
"[email protected]"
]
| |
77407c3a441761998d2364c7ac96a4444a400a40 | 325fde42058b2b82f8a4020048ff910cfdf737d7 | /src/costmanagement/azext_costmanagement/vendored_sdks/costmanagement/aio/operations_async/_dimension_operations_async.py | 9e7580d0894db821daaee8d73e42d085f1bd6d1d | [
"LicenseRef-scancode-generic-cla",
"MIT"
]
| permissive | ebencarek/azure-cli-extensions | 46b0d18fe536fe5884b00d7ffa30f54c7d6887d1 | 42491b284e38f8853712a5af01836f83b04a1aa8 | refs/heads/master | 2023-04-12T00:28:44.828652 | 2021-03-30T22:34:13 | 2021-03-30T22:34:13 | 261,621,934 | 2 | 5 | MIT | 2020-10-09T18:21:52 | 2020-05-06T01:25:58 | Python | UTF-8 | Python | false | false | 12,524 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DimensionOperations:
"""DimensionOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.costmanagement.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
scope: str,
filter: Optional[str] = None,
expand: Optional[str] = None,
skiptoken: Optional[str] = None,
top: Optional[int] = None,
**kwargs
) -> "models.DimensionsListResult":
"""Lists the dimensions by the defined scope.
:param scope: The scope associated with dimension operations. This includes
'/subscriptions/{subscriptionId}/' for subscription scope,
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}'
for Department scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}'
for EnrollmentAccount scope,
'/providers/Microsoft.Management/managementGroups/{managementGroupId}' for Management Group
scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}'
for billingProfile scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}/invoiceSections/{invoiceSectionId}'
for invoiceSection scope, and
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/customers/{customerId}'
specific for partners.
:type scope: str
:param filter: May be used to filter dimensions by properties/category, properties/usageStart,
properties/usageEnd. Supported operators are 'eq','lt', 'gt', 'le', 'ge'.
:type filter: str
:param expand: May be used to expand the properties/data within a dimension category. By
default, data is not included when listing dimensions.
:type expand: str
:param skiptoken: Skiptoken is only used if a previous operation returned a partial result. If
a previous response contains a nextLink element, the value of the nextLink element will include
a skiptoken parameter that specifies a starting point to use for subsequent calls.
:type skiptoken: str
:param top: May be used to limit the number of results to the most recent N dimension data.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DimensionsListResult or the result of cls(response)
:rtype: ~azure.mgmt.costmanagement.models.DimensionsListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DimensionsListResult"]
error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError})
api_version = "2019-11-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
else:
url = next_link
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
if skiptoken is not None:
query_parameters['$skiptoken'] = self._serialize.query("skiptoken", skiptoken, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', maximum=1000, minimum=1)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DimensionsListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/{scope}/providers/Microsoft.CostManagement/dimensions'}
def by_external_cloud_provider_type(
self,
external_cloud_provider_type: Union[str, "models.ExternalCloudProviderType"],
external_cloud_provider_id: str,
filter: Optional[str] = None,
expand: Optional[str] = None,
skiptoken: Optional[str] = None,
top: Optional[int] = None,
**kwargs
) -> "models.DimensionsListResult":
"""Lists the dimensions by the external cloud provider type.
:param external_cloud_provider_type: The external cloud provider type associated with
dimension/query operations. This includes 'externalSubscriptions' for linked account and
'externalBillingAccounts' for consolidated account.
:type external_cloud_provider_type: str or ~azure.mgmt.costmanagement.models.ExternalCloudProviderType
:param external_cloud_provider_id: This can be '{externalSubscriptionId}' for linked account or
'{externalBillingAccountId}' for consolidated account used with dimension/query operations.
:type external_cloud_provider_id: str
:param filter: May be used to filter dimensions by properties/category, properties/usageStart,
properties/usageEnd. Supported operators are 'eq','lt', 'gt', 'le', 'ge'.
:type filter: str
:param expand: May be used to expand the properties/data within a dimension category. By
default, data is not included when listing dimensions.
:type expand: str
:param skiptoken: Skiptoken is only used if a previous operation returned a partial result. If
a previous response contains a nextLink element, the value of the nextLink element will include
a skiptoken parameter that specifies a starting point to use for subsequent calls.
:type skiptoken: str
:param top: May be used to limit the number of results to the most recent N dimension data.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DimensionsListResult or the result of cls(response)
:rtype: ~azure.mgmt.costmanagement.models.DimensionsListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DimensionsListResult"]
error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError})
api_version = "2019-11-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.by_external_cloud_provider_type.metadata['url']
path_format_arguments = {
'externalCloudProviderType': self._serialize.url("external_cloud_provider_type", external_cloud_provider_type, 'str'),
'externalCloudProviderId': self._serialize.url("external_cloud_provider_id", external_cloud_provider_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
else:
url = next_link
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
if skiptoken is not None:
query_parameters['$skiptoken'] = self._serialize.query("skiptoken", skiptoken, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', maximum=1000, minimum=1)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DimensionsListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
by_external_cloud_provider_type.metadata = {'url': '/providers/Microsoft.CostManagement/{externalCloudProviderType}/{externalCloudProviderId}/dimensions'}
| [
"[email protected]"
]
| |
228481d618f5cf5de30e9d3ca069a890fdb834ce | 56b1569a62c6a155ce9cf4b8059bd085848dd859 | /Python/camera_calibration/undistort_and_transform.py | a9261ec389a83b3a764bd25733b6ee15bbf2c4c2 | []
| no_license | Marius-Juston/Advanced-Autonomous-Vehicule | 1485ccc1a3dafbb875f845b2ba00cb05c6d6ca40 | 7f188428aafe0c0dfff75dd8567199c7067be17d | refs/heads/master | 2022-11-15T04:52:19.713449 | 2020-07-10T22:09:04 | 2020-07-10T22:09:04 | 266,609,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,760 | py | import pickle
import cv2
import matplotlib.pyplot as plt
import numpy as np
# Read in the saved camera matrix and distortion coefficients
# These are the arrays you calculated using cv2.calibrateCamera()
dist_pickle = pickle.load(open("wide_dist_pickle.p", "rb"))
mtx = dist_pickle["mtx"]
dist = dist_pickle["dist"]
# Read in an image
img = cv2.imread('test_image2.png')
nx = 8 # the number of inside corners in x
ny = 6 # the number of inside corners in y
# MODIFY THIS FUNCTION TO GENERATE OUTPUT
# THAT LOOKS LIKE THE IMAGE ABOVE
def corners_unwarp(img, nx, ny, mtx, dist):
# Pass in your image into this function
# Write code to do the following steps
# 1) Undistort using mtx and dist
# 2) Convert to grayscale
# 3) Find the chessboard corners
# 4) If corners found:
# a) draw corners
# b) define 4 source points src = np.float32([[,],[,],[,],[,]])
# Note: you could pick any four of the detected corners
# as long as those four corners define a rectangle
# One especially smart way to do this would be to use four well-chosen
# corners that were automatically detected during the undistortion steps
# We recommend using the automatic detection of corners in your code
# c) define 4 destination points dst = np.float32([[,],[,],[,],[,]])
# d) use cv2.getPerspectiveTransform() to get M, the transform matrix
# e) use cv2.warpPerspective() to warp your image to a top-down view
undistorted = cv2.undistort(img, mtx, dist, None, mtx)
gray = cv2.cvtColor(undistorted, cv2.COLOR_BGR2GRAY)
grid_size = (8, 6)
ret, corners = cv2.findChessboardCorners(gray, grid_size, None)
if ret:
cv2.drawChessboardCorners(undistorted, (nx, ny), corners, ret)
up_left = 0
up_right = grid_size[0] - 1
down_left = grid_size[0] * (grid_size[1] - 1)
down_right = down_left + grid_size[0] - 1
source_points = np.array([corners[up_left][0], corners[up_right][0], corners[down_left][0], corners[down_right][0]],
dtype=np.float32)
offset = 100
h, w = gray.shape
dist_points = np.array([[offset, offset], [w - offset, offset], [offset, h - offset], [w - offset, h - offset]], dtype=np.float32)
M = cv2.getPerspectiveTransform(source_points, dist_points)
perspective = cv2.warpPerspective(undistorted, M, (w, h), flags=cv2.INTER_LINEAR)
return perspective, M
top_down, perspective_M = corners_unwarp(img, nx, ny, mtx, dist)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=50)
ax2.imshow(top_down)
ax2.set_title('Undistorted and Warped Image', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
plt.show()
| [
"[email protected]"
]
| |
fa15c3be548b8dc694d4faf4e585ba491143fd5c | cf7b3522b6fa8765b3f12dec06fd2868712d4e9a | /cristianoronaldoyopmailcom_282/wsgi.py | a1d3ebed57f67b9882524a591c63040c549c940e | []
| no_license | payush/cristianoronaldoyopmailcom-282 | 367d381b440343a8c75b4f8ae557fbf4c34b419e | 06e7f0fa63e863f11181bbde515574eda9844891 | refs/heads/master | 2020-03-23T13:10:53.127490 | 2018-07-19T16:16:59 | 2018-07-19T16:16:59 | 141,604,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | """
WSGI config for cristianoronaldoyopmailcom_282 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cristianoronaldoyopmailcom_282.settings")
application = get_wsgi_application()
| [
"[email protected]"
]
| |
7bdb7f5fdf1b70740ee3bf3595aca652f7137168 | 5ce77901781240f5c42539a471b27bbc8cbe444f | /Analysis/plot_in_degree.py | 13b954779fd0e2a1556bc72dddbb124fa3de5a0a | []
| no_license | jcate6/Bitcoin-fdac17 | a70f5d37adf8fa887602e25b83fcfe8fa970b4a7 | 0098a5f3a8b22a1dad69f1a38722836dfcc7108f | refs/heads/master | 2021-08-24T12:19:24.309279 | 2017-12-09T20:03:50 | 2017-12-09T20:03:50 | 107,714,261 | 0 | 4 | null | 2017-12-09T18:12:54 | 2017-10-20T18:44:21 | TeX | UTF-8 | Python | false | false | 520 | py | from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
data = pd.read_csv("In_degree_distros\\2017-11-15.csv")
data = data.as_matrix()
x = data[:,0]
y = data[:,1]
x = x[x<=100]
y = y[:x.shape[0]]
fig = plt.figure()
ax = fig.add_subplot(1,2,1)
ax.plot(x, y)
ax.set_xlabel("In-degree")
ax.set_ylabel("Frequency")
ax.set_title("In-Degree over Time")
ax.set_xscale("log")
ax.set_yscale("log")
fig.savefig("Plots\\2017-11-15_in_degree.png", bbox_inches='tight')
#plt.show()
| [
"[email protected]"
]
| |
f72e767999eef2f1d01d990eed01d56b07395a50 | 2f2d4571066a443121a1930b40b4045b7c284306 | /SimpleEventCounter/SimpleEventCounter/python/ConfFile_reco_cfg.py | fbdfe0916b529df061fbc439bc52a344f588d344 | []
| no_license | rvenditti/Tau3MuSearch | fb99397dfabb2d307535d80b374b86295df7de12 | 36f699e265164829c9843c787f6c9a9dfbbff696 | refs/heads/master | 2021-07-11T17:02:37.833567 | 2020-07-15T13:23:14 | 2020-07-15T13:23:14 | 177,627,204 | 1 | 4 | null | 2020-07-15T13:23:15 | 2019-03-25T16:48:07 | C | UTF-8 | Python | false | false | 1,012 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("Demo")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.load('Configuration.StandardSequences.Services_cff')
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.source = cms.Source("PoolSource",
# replace 'myfile.root' with the source file you want to use
fileNames = cms.untracked.vstring(
'file:/lustre/cms/store/user/rosma/SingleMuon/crab_SingleMuonRun2016B_MyZMuSkim_CMSSW_8_0_10_v4/170108_161635/0000/skims_SKIM_854.root'
# 'file:./Run2016B_SingleMuon_RAWRECO_ZMuPromptReco.root'
)
)
process.recoMuAna = cms.EDAnalyzer('RecoMuonAnalyzer',
muonsInputTag = cms.InputTag("muons"),
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string("histoSingleMu_reco.root")
)
process.p = cms.Path(process.recoMuAna)
| [
"[email protected]"
]
| |
c4804bc3d75a90ddebcf2c795fe708d59d31f064 | 3a2ac319a9a09bdc05f5e4deff56419eb6c661de | /som.py | 5a5c889ba1702d5a931e33520ea6fb389c585716 | [
"MIT"
]
| permissive | mkruuse/segmenting-turbulent-simulations-with-ensemble-learning | 817229d455a18ea229a32f89ec8564d28f8c9751 | 8708e8052974f1f49d91e5ad769e6539865d5906 | refs/heads/main | 2023-07-21T14:14:36.466172 | 2021-09-02T13:12:07 | 2021-09-02T13:12:07 | 377,840,218 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,327 | py | ## SOM code for reproducing Bussov & Nattila 2021 image segmentation results
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.tri as tri
import matplotlib.pyplot as plt
#import tensorflow as tf
from matplotlib import colors
import popsom.popsom as popsom
import pandas as pd
import colorsys
import h5py as h5
import sys, os
from utils_plot2d import read_var
from utils_plot2d import Conf
from utils_plot2d import imshow
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.cluster import KMeans
from sklearn import metrics
import argparse
parser = argparse.ArgumentParser(description='popsom code')
parser.add_argument('--xdim', type=int, dest='xdim', default=10, help='Map x size')
parser.add_argument('--ydim', type=int, dest='ydim', default=10, help='Map y size')
parser.add_argument('--alpha', type=float, dest='alpha', default=0.5, help='Learning parameter')
parser.add_argument('--train', type=int, dest='train', default=10000, help='Number of training steps')
args = parser.parse_args()
def neighbors(arr,x,y,n=3):
''' Given a 2D-array, returns an nxn array whose "center" element is arr[x,y]'''
arr=np.roll(np.roll(arr,shift=-x+1,axis=0),shift=-y+1,axis=1)
return arr[:n,:n]
if __name__ == "__main__":
# set-up plotting
#plt.fig = plt.figure(1, figsize=(4,3.5), dpi=200)
#fig = plt.figure(1, figsize=(6,6), dpi=300)
plt.rc('font', family='sans-serif')
#plt.rc('text', usetex=True)
plt.rc('xtick', labelsize=5)
plt.rc('ytick', labelsize=5)
plt.rc('axes', labelsize=5)
scaler = StandardScaler()
conf = Conf()
#build feature matrix
feature_list = [
'rho',
'bx',
'by',
'bz',
'ex',
'ey',
'ez',
'jx',
'jy',
'jz',
]
#--------------------------------------------------
def read_h5_data_size(fdir, lap):
conf.fields_file = fdir + 'raw_data_'+str(lap)+'.h5'
f5F = h5.File(conf.fields_file,'r')
#read one and check size
val0 = read_var(f5F, feature_list[0])
nx,ny = np.shape(val0)
return nx,ny
xmin = 0.0
ymin = 0.0
xmax = 1.0
ymax = 1.0
fdir = 'sample-raw-data/' # data directory; assume main dir in this sample script
laps = [6600] # all the data laps to process
lap = laps[0] # data file number
#subimage size
splitf = 1
#total size of the complete image in subimages
splitfx = splitf*len(laps)
splitfy = splitf
nx,ny = read_h5_data_size(fdir, laps[0])
nxs = nx//splitf
nys = ny//splitf
if True:
f5 = h5.File('data_features_{}.h5'.format(lap), 'r')
x = f5['features'][()]
y = f5['target'][()]
feature_list = f5['names'][()]
feature_list = [n.decode('utf-8') for n in feature_list]
f5.close()
print(feature_list)
print("shape after x:", np.shape(x))
#--------------------------------------------------
# analyze
#1. standardize:
scaler = StandardScaler()
scaler = MinMaxScaler()
scaler.fit(x)
x = scaler.transform(x)
##### Using the SOM:
# POPSOM SOM:
attr=pd.DataFrame(x)
attr.columns=feature_list
#parser.parse_args
print("setting dimensions", parser.parse_args())
print('constructing SOM...')
m=popsom.map(args.xdim, args.ydim, args.alpha, args.train)
labels = [str(xxx) for xxx in range(len(x))]
m.fit(attr,labels)
m.starburst()
m.significance()
#Data matrix with neuron positions:
data_matrix=m.projection()
data_Xneuron=data_matrix['x']
data_Yneuron=data_matrix['y']
print("Printing Xneuron info")
print(data_Xneuron)
print("Printing Xneuron info position 5")
print(data_Xneuron[4])
print("Printing Yneuron info")
print(data_Yneuron)
#Neuron matrix with centroids:
umat = m.compute_umat(smoothing=2)
centrs = m.compute_combined_clusters(umat, False, 0.15) #0.15
centr_x = centrs['centroid_x']
centr_y = centrs['centroid_y']
#create list of centroid _locations
nx, ny = np.shape(centr_x)
centr_locs = []
for i in range(nx):
for j in range(ny):
cx = centr_x[i,j]
cy = centr_y[i,j]
centr_locs.append((cx,cy))
unique_ids = list(set(centr_locs))
print(unique_ids)
n_clusters = len(unique_ids)
print("Number of clusters")
print(n_clusters)
mapping = {}
for I, key in enumerate(unique_ids):
print(key, I)
mapping[key] = I
clusters = np.zeros((nx,ny))
for i in range(nx):
for j in range(ny):
key = (centr_x[i,j], centr_y[i,j])
I = mapping[key]
clusters[i,j] = I
print(centr_x)
print(centr_y)
print("clusters")
print(clusters)
print(np.shape(clusters))
#TRANSFER RESULT BACK INTO ORIGINAL DATA PLOT
if True:
print("plotting SOM cluster images")
fig = plt.figure(1, figsize=(6,6), dpi=300) #need to hack the number of columns according to image count
fig.clf()
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
gs = plt.GridSpec(splitfx, splitfy)
gs.update(hspace = 0.05)
gs.update(wspace = 0.05)
#Create N number of colors:
def get_N_HexCol(N=n_clusters):
HSV_tuples = [(x * 1.0 / N, 0.5, 0.5) for x in range(N)]
hex_out = []
for rgb in HSV_tuples:
rgb = map(lambda x: int(x * 255), colorsys.hsv_to_rgb(*rgb))
hex_out.append('#%02x%02x%02x' % tuple(rgb))
return hex_out
cols=get_N_HexCol(N=n_clusters)
print("The count of colors, should be same as number of clusters")
print(len(cols))
print("Colors", cols)
cmap = colors.ListedColormap(cols) #should here be len(cols)?
bounds=np.arange(cmap.N)
norm = colors.BoundaryNorm(bounds, cmap.N)
data_back = np.zeros((nxs,nys,splitfx*splitfy))
xinds = np.zeros(len(data_Xneuron))
print("shape of xinds:", np.shape(xinds))
j = 0
for ix in range(nxs):
for iy in range(nys):
for isubim in range(splitfx*splitfy):
data_back[ix,iy,isubim] = clusters[data_Xneuron[j], data_Yneuron[j]]
xinds[j] = clusters[data_Xneuron[j], data_Yneuron[j]]
j += 1
f5 = h5.File('data_som_clusters_{}.h5'.format(lap), 'w')
dsetx = f5.create_dataset("databack", data=data_back)
f5.close()
print("Done writing the cluster ID file")
for spf in range(splitfx*splitfy):
ax = fig.add_subplot(splitfx,splitfy, spf+1, xticks=[], yticks=[])
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.clear()
ax.minorticks_on()
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
extent = [ xmin, xmax, ymin, ymax ]
mgrid = data_back[:,:,spf].T
im = ax.imshow(mgrid, extent=extent)
fig.savefig('som_data_transform.png')
print("done plotting")
# #PLOTTING:
#visualize clusters
x = np.array(x)
data_back = np.array(data_back)
if True:
print("visualizing SOM data")
#fig2 = plt.figure(2, figsize=(splitfx,splitfy), dpi=400)
fig2 = plt.figure(2, figsize=(8,8), dpi=400)
fig2.clf()
ic = 0
nfea = len(feature_list)
for jcomp in range(nfea):
for icomp in range(nfea):
ic += 1
print("i {} j {}".format(icomp, jcomp))
#skip covariance with itself
if icomp == jcomp:
continue
#skip upper triangle
if icomp > jcomp:
continue
ax = fig2.add_subplot(nfea, nfea, ic)
for ki in range(n_clusters):
indxs = np.where(xinds == ki)
#print("len of found pixels:", len(indxs), indxs)
xx = x[indxs, icomp]
yy = x[indxs, jcomp]
xxt = xx[::500]
yyt = yy[::500]
ax.scatter(
xxt,
yyt,
c=cols[ki],
marker='.',
s=0.1,
rasterized=True,
)
if False:
#visualize most dissipative points
xx = x[:,icomp]
yy = x[:,jcomp]
zz = y[:]
xxd = xx[np.where(np.abs(zz) > 0.020)]
yyd = yy[np.where(np.abs(zz) > 0.020)]
zzd = zz[np.where(np.abs(zz) > 0.020)]
xxd = xxd[::100]
yyd = yyd[::100]
zzd = zzd[::100]
print("found {} points above threshold".format(len(xxd)))
ax.scatter(xxd,yyd,c=zzd,
cmap='inferno',
vmin=-0.015,
vmax= 0.015,
marker='.',
s=0.05,
alpha=0.1,
)
if jcomp == nfea-1:
ax.set_xlabel('{}'.format(feature_list[icomp]))
else:
ax.set_xticks([])
if icomp == 0:
ax.set_ylabel('{}'.format(feature_list[jcomp]))
else:
ax.set_yticks([])
fig2.savefig('som_clusters.pdf')
| [
"[email protected]"
]
| |
62883f450ecf5b25634e8fe8c444a394c084319d | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /aYTLW5jmRoXhnnzwj_6.py | 08b1065f1573b361271abc724e18f8f6becac06c | []
| no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py |
query = " SELECT Name FROM employees WHERE Salary > 45000"
| [
"[email protected]"
]
| |
1f5525b4232b289bdbea2755bdbdd2b8336c6741 | de74a2af11962af7a8ef3dfb16fa130d35580f3a | /pylib/mailutils.py | b67e832b62cb800308e9238eee823b39470f78de | []
| no_license | vieyahn/winterpy | 4d46fa196bd7517ce4adc785ec803d9fc9aad0a0 | 5a54bcd4dd6d1c6c41b971d6049bcd94e9fb0e70 | refs/heads/master | 2021-01-14T12:57:26.515877 | 2016-08-26T10:38:33 | 2016-08-26T10:39:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,365 | py | # vim:fileencoding=utf-8
import re
import datetime
import codecs
from email import header
from email.header import Header
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
addr_re = re.compile(r'(.*?)\s+(<[^>]+>)($|,\s*)')
def decode_multiline_header(s):
ret = []
for b, e in header.decode_header(re.sub(r'\n\s+', ' ', s)):
if e:
if e.lower() == 'gb2312':
e = 'gb18030'
b = b.decode(e)
elif isinstance(b, bytes):
b = b.decode('ascii')
ret.append(b)
return ''.join(ret)
def get_datetime(m):
d = m['Date']
# Wed, 18 Jun 2014 04:09:18 +0000
t = datetime.datetime.strptime(d, '%a, %d %b %Y %H:%M:%S %z')
# convert to local time
return datetime.datetime.fromtimestamp(t.timestamp())
def decode_payload(m):
p = m.get_payload()
enc = m['Content-Transfer-Encoding']
ctype = m['Content-Type']
charset = get_charset_from_ctype(ctype) or 'utf-8'
return codecs.decode(p.encode(), enc).decode(charset)
def assemble_mail(subject, to, from_, html=None, text=None):
if html is None and text is None:
raise TypeError('no message given')
if html:
html = MIMEText(html, 'html', 'utf-8')
if text:
text = MIMEText(text, 'plain', 'utf-8')
if html and text:
msg = MIMEMultipart('alternative', _subparts = [text, html])
else:
msg = html or text
msg['Subject'] = encode_header(subject)
msg['From'] = encode_header_address(from_)
if isinstance(to, (list, tuple)):
msg['To'] = ', '.join(encode_header_address(x) for x in to)
else:
msg['To'] = encode_header_address(to)
return msg
def encode_header_address(s):
return addr_re.sub(_addr_submatch, s)
def encode_header(s):
return Header(s, 'utf-8').encode() if not eight_bit_clean(s) else s
def _addr_submatch(m):
return encode_header(m.group(1)) + ' ' + m.group(2) + m.group(3)
def eight_bit_clean(s):
return all(ord(c) < 128 for c in s)
def get_charset_from_ctype(ctype):
pos = ctype.find('charset=')
if pos > 0:
charset = ctype[pos+8:]
if charset.lower() == 'gb2312':
# Windows misleadingly uses gb2312 when it's gbk or gb18030
charset = 'gb18030'
elif charset.lower() == 'windows-31j':
# cp932's IANA name (Windows-31J), extended shift_jis
# https://en.wikipedia.org/wiki/Code_page_932
charset = 'cp932'
return charset
| [
"[email protected]"
]
| |
6941f33fa19a99ce7ea12e17aa5a6c6b7beef08a | 2256297f4925bcecac88d804c6a1be2224d56bcd | /game/misc/__init__.py | 20227fcd7f95eb2245ccab25843b8af7f56455ef | [
"MIT"
]
| permissive | thealphadollar/brkout | 15877cb9bc0aa261b72eccc7e2cda53fe35b153e | 9dc7bc5421ea5f554462aee9f363b22a61567613 | refs/heads/master | 2022-01-11T08:53:01.821940 | 2019-05-05T10:14:28 | 2019-05-05T10:14:28 | 114,479,027 | 14 | 28 | MIT | 2019-05-05T10:14:29 | 2017-12-16T17:40:24 | Python | UTF-8 | Python | false | false | 125 | py | ''' Import all modules in the package '''
from .game_enums import *
from .game_parameters import *
from .collisions import *
| [
"[email protected]"
]
| |
ce745e580227ed4af6b01b2899b374cfe1fa8c09 | d332507e59e0abb0315401e687638f62f9341a74 | /src/openpal/AllHeaders.h | 91b97a63fb778880bbaa13128732c50ee3ac5754 | [
"Apache-2.0"
]
| permissive | txjmb/pydnp3 | 151591634181e63582ac2a9479a6286730d7a48d | fff9835d5ce9a75bd89a585942d3fbd3ad3de923 | refs/heads/master | 2022-04-21T11:09:48.319612 | 2020-04-23T20:06:46 | 2020-04-23T20:06:46 | 257,956,527 | 0 | 0 | Apache-2.0 | 2020-04-22T16:23:23 | 2020-04-22T16:23:22 | null | UTF-8 | Python | false | false | 3,008 | h | /*
* -*- coding: utf-8 -*- {{{
* vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
*
* Copyright 2018, Kisensum.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Neither Kisensum, nor any of its employees, nor any jurisdiction or
* organization that has cooperated in the development of these materials,
* makes any warranty, express or implied, or assumes any legal liability
* or responsibility for the accuracy, completeness, or usefulness or any
* information, apparatus, product, software, or process disclosed, or
* represents that its use would not infringe privately owned rights.
* Reference herein to any specific commercial product, process, or service
* by trade name, trademark, manufacturer, or otherwise does not necessarily
* constitute or imply its endorsement, recommendation, or favoring by Kisensum.
* }}}
*/
#ifndef PYDNP3_OPENPAL_ALLHEADERS_H
#define PYDNP3_OPENPAL_ALLHEADERS_H
#define PYDNP3_OPENPAL
namespace openpal {
class UInt48Type;
}
namespace opendnp3 {
typedef openpal::UInt48Type DNPTime;
}
// ---------- OPENPAL HEADERS ----------
//#include "channel/IPhysicalLayer.h" //missing "ChannelStatistics.h" file
#include "channel/IPhysicalLayerCallbacks.h"
#include "container/Array.h"
#include "container/ArrayView.h"
#include "container/Buffer.h"
#include "container/HasSize.h"
#include "container/Pair.h"
#include "container/RSlice.h"
#include "container/Settable.h"
#include "container/StaticBuffer.h"
#include "container/WSlice.h"
#include "executor/IExecutor.h"
#include "executor/IMonotonicTimeSource.h"
#include "executor/ITimer.h"
#include "executor/IUTCTimeSource.h"
#include "executor/MonotonicTimestamp.h"
#include "executor/TimeDuration.h"
#include "executor/TimerRef.h"
#include "executor/UTCTimestamp.h"
#include "logging/ILogHandler.h"
#include "logging/LogEntry.h"
#include "logging/LogFilters.h"
#include "logging/Logger.h"
#include "logging/LogLevels.h"
#include "logging/StringFormatting.h"
#include "serialization/DoubleFloat.h"
#include "serialization/FloatByteOrder.h"
#include "serialization/Format.h"
#include "serialization/Parse.h"
#include "serialization/Serialization.h"
#include "serialization/SerializationTemplatesLE.h"
#include "serialization/Serializer.h"
#include "serialization/SingleFloat.h"
#include "util/Comparisons.h"
#include "util/Finally.h"
#include "util/Limits.h"
#include "util/SequenceNum.h"
#include "util/ToHex.h"
#include "util/Uncopyable.h"
#include "Configure.h"
#endif
| [
"[email protected]"
]
| |
b34912f55df79ef6fca6df9cc4125e84a58ff2d4 | c459f4dd7b198ec8d8db8379726a5b2650be6636 | /appl/migrations/0031_projectapplication_verification_number.py | 42ff161f342dcf374df751e7b6ac04d39b7aafad | []
| no_license | jittat/admapp | 4c712182cd06e82efab6c2513fb865e5d00feae8 | 38bf299015ae423b4551f6b1206742ee176b8b77 | refs/heads/master | 2023-06-10T03:23:41.174264 | 2023-06-09T19:41:03 | 2023-06-09T19:41:03 | 101,953,724 | 10 | 4 | null | 2023-04-21T22:48:55 | 2017-08-31T03:12:04 | Python | UTF-8 | Python | false | false | 483 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-11 04:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('appl', '0030_auto_20171011_0416'),
]
operations = [
migrations.AddField(
model_name='projectapplication',
name='verification_number',
field=models.CharField(blank=True, max_length=20),
),
]
| [
"[email protected]"
]
| |
ca864943b93a2b6e37b700762d4dd9f484604ac2 | 0e478f3d8b6c323c093455428c9094c45de13bac | /src/OTLMOW/PostenMapping/Model/Post050407019.py | 4569171ae4b997901f889b207ead5e7a75b37585 | [
"MIT"
]
| permissive | davidvlaminck/OTLMOW | c6eae90b2cab8a741271002cde454427ca8b75ba | 48f8c357c475da1d2a1bc7820556843d4b37838d | refs/heads/main | 2023-01-12T05:08:40.442734 | 2023-01-10T15:26:39 | 2023-01-10T15:26:39 | 432,681,113 | 3 | 1 | MIT | 2022-06-20T20:36:00 | 2021-11-28T10:28:24 | Python | UTF-8 | Python | false | false | 2,916 | py | # coding=utf-8
from OTLMOW.PostenMapping.StandaardPost import StandaardPost
from OTLMOW.PostenMapping.StandaardPostMapping import StandaardPostMapping
# Generated with PostenCreator. To modify: extend, do not edit
class Post050407019(StandaardPost):
def __init__(self):
super().__init__(
nummer='0504.07019',
beschrijving='Fundering van zandcement volgens 5-4.7, dikte 19 cm',
meetstaateenheid='M2',
mappings=[StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.laagRol',
dotnotation='laagRol',
defaultWaarde='fundering',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.07019')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw.type',
dotnotation='type',
defaultWaarde='zandcement',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.07019')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#LaagDikte.dikte',
dotnotation='dikte',
defaultWaarde='19',
range='',
usagenote='cm^^cdt:ucumunit',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.07019')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.oppervlakte',
dotnotation='oppervlakte',
defaultWaarde='',
range='',
usagenote='m2^^cdt:ucumunit',
isMeetstaatAttr=1,
isAltijdInTeVullen=1,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.07019')])
| [
"[email protected]"
]
| |
840e8e3b7c7e142ac1748f12224d4f410b3f3df9 | 3c56b08398d4478328ecaf5e70599e1b7e23d70b | /ml_infra_template.py | a76a21aebbc87e8ad51a139be4f6605dc76f245c | []
| no_license | sidneyriffic/ml_infra_template | 3e33f4a8ddabd6ac1e07f747fba902e0df27b703 | d2681964165be04f1a30b5a4436755e44724090f | refs/heads/master | 2022-12-27T10:07:55.735571 | 2020-10-07T20:17:28 | 2020-10-07T20:17:28 | 302,149,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | #!/usr/bin/env python3
"""Command line entry point for data/model interactions"""
import importlib
import argparse
import preprocess_ex as ppex
print(ppex)
description = 'Define model and data operations'
parser = argparse.ArgumentParser(description)
parser.add_argument('-m', '--model', dest='make_model_path',
help='Model build path')
parser.add_argument('-p', '--preprocess', dest='pre_path',
help='Preprocess folder path')
parser.add_argument('-s', '--serialized', dest='serial_model',
help='Use a saved serialized model')
parser.add_argument('-t', '--train', dest='train_path',
help='Train a model')
args = parser.parse_args()
print(args)
pre_path = args.pre_path[1:-1]
preprocess = importlib.import_module(pre_path + '.preprocess')
print(preprocess)
preprocess.preprocess(pre_path + '/')
| [
"[email protected]"
]
| |
9fae75716351829fb089c9d7e5195ade93a0258a | 381fd0a6f1f716f68bb2d5ef6340cee59e770065 | /advent_of_code/2020/day7.py | 048b6df7150076ade05fd62922a0f8c2756193b5 | [
"Unlicense"
]
| permissive | netotz/codecamp | 4ec5ca4f8cf0adcdcbf99cd5533304ddd3d060d2 | 208fd1c929a85397ecdaa0c6cc74b8c6d99d76c7 | refs/heads/master | 2023-02-09T08:23:32.778882 | 2023-01-25T01:15:45 | 2023-01-25T01:15:45 | 253,117,424 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,783 | py | import re
def parse_input(rawrules):
return {
fl[0]: {} if fl[1] == ''
else {
s[2:]: int(s[0])
for s in fl[1].split(' , ')
}
for line in rawrules.splitlines()
if (fl := [
s.strip()
for s in
re.sub(
r'(no other)*|bag(s*)|[.]', '', line
).split('contain')
])
}
with open('inputs/input7.txt') as file:
input7 = parse_input(file.read())
MYBAG = 'shiny gold'
def get_containers(rules):
containers = set()
def is_container(bag):
subbags = set(b for b in rules[bag])
if containers & subbags or MYBAG in subbags:
containers.add(bag)
return True
for b in subbags:
if is_container(b):
containers.add(b)
return True
for bag in rules:
if bag in containers:
continue
if is_container(bag):
containers.add(bag)
return containers
def count_required(rules):
def count_subbags(bag):
subbags = rules[bag].items()
if not subbags:
return 0
local_count = 0
for b, c in subbags:
accumulated = count_subbags(b)
if accumulated == 0:
local_count += c
else:
local_count += accumulated * c
return local_count + 1
total_bags = 0
for bag, count in rules[MYBAG].items():
total_bags += count_subbags(bag) * count
return total_bags
answer1 = len(get_containers(input7))
answer2 = count_required(input7)
def test():
raw = '''light red bags contain 1 bright white bag, 2 muted yellow bags.
dark orange bags contain 3 bright white bags, 4 muted yellow bags.
bright white bags contain 1 shiny gold bag.
muted yellow bags contain 2 shiny gold bags, 9 faded blue bags.
shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags.
dark olive bags contain 3 faded blue bags, 4 dotted black bags.
vibrant plum bags contain 5 faded blue bags, 6 dotted black bags.
faded blue bags contain no other bags.
dotted black bags contain no other bags.'''
sample = parse_input(raw)
assert len(get_containers(sample)) == 4
assert count_required(sample) == 32
raw = '''shiny gold bags contain 2 dark red bags.
dark red bags contain 2 dark orange bags.
dark orange bags contain 2 dark yellow bags.
dark yellow bags contain 2 dark green bags.
dark green bags contain 2 dark blue bags.
dark blue bags contain 2 dark violet bags.
dark violet bags contain no other bags.'''
sample = parse_input(raw)
assert count_required(sample) == 126
| [
"[email protected]"
]
| |
ff753b9ca5453fbb8d40b0c432ca3478c3c2c751 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/DS8200v2-TC-MIB.py | 9508e7b6cfa965606170f2e85cae511187b5a04d | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 2,044 | py | #
# PySNMP MIB module DS8200v2-TC-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DS8200v2-TC-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:54:27 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Counter64, Counter32, Bits, Unsigned32, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, MibIdentifier, Gauge32, IpAddress, ObjectIdentity, ModuleIdentity, enterprises, TimeTicks, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "Counter32", "Bits", "Unsigned32", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "MibIdentifier", "Gauge32", "IpAddress", "ObjectIdentity", "ModuleIdentity", "enterprises", "TimeTicks", "iso")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
verilink = ModuleIdentity((1, 3, 6, 1, 4, 1, 321))
if mibBuilder.loadTexts: verilink.setLastUpdated('0011150000Z')
if mibBuilder.loadTexts: verilink.setOrganization('Verilink Corporation')
if mibBuilder.loadTexts: verilink.setContactInfo('Bob Ray [email protected] 1-256-774-2380')
if mibBuilder.loadTexts: verilink.setDescription('DS8200v2 TC MIB.')
hbu = MibIdentifier((1, 3, 6, 1, 4, 1, 321, 100))
mibBuilder.exportSymbols("DS8200v2-TC-MIB", hbu=hbu, PYSNMP_MODULE_ID=verilink, verilink=verilink)
| [
"[email protected]"
]
| |
d7fbc3caf38b5d1e8c88ba18cd5ba590516ea044 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_176/ch62_2019_03_29_14_32_02_630978.py | c677efb4ff50461d54b43e6c822ed6442ba27cd1 | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | def filtra_positivos(n):
positivos=[]
i=0
while i<len(n):
if n[i]>0:
positivos.append(n[i])
i+=1
return positivos | [
"[email protected]"
]
| |
ec80c963f307638e5a6e9a96460f5ff51ef02556 | 81b438781ecc307225fcc6141238f8a8ef03bd64 | /Project/src/Modules/House/Hvac/_test/test_hvac.py | 8953a34e781e131dba4abbe5483197e7479b8deb | []
| permissive | DBrianKimmel/PyHouse | b7d61be4dc6ce9e3332228a6c633e81fdfd8a908 | a100fc67761a22ae47ed6f21f3c9464e2de5d54f | refs/heads/develop | 2021-01-23T09:30:08.722975 | 2020-02-29T16:30:08 | 2020-02-29T16:30:08 | 4,125,178 | 3 | 1 | MIT | 2020-07-19T22:07:18 | 2012-04-24T13:53:33 | Python | UTF-8 | Python | false | false | 2,518 | py | """
@name: PyHouse/Project/src/Modules/Housing/Hvac/_test/test_hvac.py
@author: D. Brian Kimmel
@contact: [email protected]
@copyright: (c) 2015-2019 by D. Brian Kimmel
@license: MIT License
@note: Created on Jul 12, 2015
@Summary:
Passed all 5 tests - DBK - 2019-06-04
"""
__updated__ = '2019-10-06'
# Import system type stuff
import xml.etree.ElementTree as ET
from twisted.trial import unittest
# Import PyMh files and modules.
from _test.testing_mixin import SetupPyHouseObj
from Modules.Core.data_objects import ThermostatData
from Modules.Housing.Hvac.hvac import Api as hvacApi
from Modules.Core.Utilities.debug_tools import PrettyFormatAny
class SetupMixin(object):
"""
"""
def setUp(self, p_root):
self.m_pyhouse_obj = SetupPyHouseObj().BuildPyHouseObj(p_root)
self.m_xml = SetupPyHouseObj().BuildXml(p_root)
self.m_api = hvacApi(self.m_pyhouse_obj)
self.m_thermostat_obj = ThermostatData()
class A0(unittest.TestCase):
def setUp(self):
pass
def test_00_Print(self):
print('Id: test_hvac')
class A1_XML(SetupMixin, unittest.TestCase):
def setUp(self):
SetupMixin.setUp(self, ET.fromstring(XML_LONG))
def test_01_Tags(self):
""" Test to be sure the compound object was built correctly - Rooms is an empty dict.
"""
# print(PrettyFormatAny.form(self.m_xml, 'A1-01-A - Tags'))
self.assertEqual(self.m_xml.root.tag, TESTING_PYHOUSE)
self.assertEqual(self.m_xml.house_div.tag, 'HouseDivision')
self.assertEqual(self.m_xml.hvac_sect.tag, 'HvacSection')
self.assertEqual(self.m_xml.thermostat_sect.tag, 'ThermostatSection')
self.assertEqual(self.m_xml.thermostat.tag, 'Thermostat')
def test_02_Load(self):
"""
"""
l_obj = self.m_api.LoadXml(self.m_pyhouse_obj)
# print(PrettyFormatAny.form(l_obj, 'A1-02-A - Thermostats', 105))
self.assertEqual(len(l_obj.Thermostats), 2)
class A2_EmptyXML(SetupMixin, unittest.TestCase):
def setUp(self):
SetupMixin.setUp(self, ET.fromstring(XML_EMPTY))
def test_01_BuildObjects(self):
""" Test to be sure the compound object was built correctly - Rooms is an empty dict.
"""
self.assertEqual(self.m_pyhouse_obj.House.Rooms, {})
def test_02_Load(self):
"""
"""
l_obj = self.m_api.LoadXml(self.m_pyhouse_obj)
self.assertEqual(len(l_obj.Thermostats), 0)
# ## END DBK
| [
"[email protected]"
]
| |
037b3c1da16ce4174e61a9c521b97380a06d93ef | 34c57c605eba40b67e2de338c1e101a1c4cb6b72 | /nn_iris.py | c2a412be2309b194453bb81384e3c2e5fc81088d | []
| no_license | youngsoul/pyimagesearch-python-machine-learning | 9af38980e9e408855f4457de82fc8ffd1fd00837 | 1efeb3035efb24348489d36f8db551a395afd144 | refs/heads/master | 2023-07-19T18:27:00.158079 | 2021-01-06T01:52:32 | 2021-01-06T01:52:32 | 166,071,588 | 0 | 2 | null | 2023-07-06T22:29:48 | 2019-01-16T16:20:48 | Jupyter Notebook | UTF-8 | Python | false | false | 1,517 | py | # USAGE
# python nn_iris.py
# import the necessary packages
from keras.models import Sequential
from keras.layers.core import Dense
from keras.optimizers import SGD
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, accuracy_score
from sklearn.datasets import load_iris
# load the Iris dataset and perform a training and testing split,
# using 75% of the data for training and 25% for evaluation
print("[INFO] loading data...")
dataset = load_iris()
(trainX, testX, trainY, testY) = train_test_split(dataset.data,
dataset.target, test_size=0.25, random_state=32)
# encode the labels as 1-hot vectors
lb = LabelBinarizer()
trainY = lb.fit_transform(trainY)
testY = lb.transform(testY)
# define the 4-3-3-3 architecture using Keras
model = Sequential()
model.add(Dense(3, input_shape=(4,), activation="sigmoid"))
model.add(Dense(3, activation="sigmoid"))
model.add(Dense(3, activation="softmax"))
# train the model using SGD
print("[INFO] training network...")
opt = SGD(lr=0.1, momentum=0.9, decay=0.1 / 250)
model.compile(loss="categorical_crossentropy", optimizer=opt,
metrics=["accuracy"])
H = model.fit(trainX, trainY, validation_data=(testX, testY),
epochs=250, batch_size=16)
# evaluate the network
print("[INFO] evaluating network...")
predictions = model.predict(testX, batch_size=16)
print(classification_report(testY.argmax(axis=1),
predictions.argmax(axis=1), target_names=dataset.target_names)) | [
"[email protected]"
]
| |
87ebecbb428b65919bd35b4d33ef229bda00f646 | 7c15f211adc9e9eb9f66ccdd570c9f38dff7ea8d | /packages/autorest.python/test/vanilla/legacy/Expected/AcceptanceTests/SecurityKeySwaggerCredentialFlag/securitykeyswaggercredentialflag/aio/__init__.py | 9ad4814fd5f1de73db95b78dbb4a74561e35acb3 | [
"LicenseRef-scancode-generic-cla",
"MIT"
]
| permissive | Azure/autorest.python | cc4bfbf91ae11535731cad37cedd6b733edf1ebd | a00d7aaa3753ef05cb5a0d38c664a90869478d44 | refs/heads/main | 2023-09-03T06:58:44.246200 | 2023-08-31T20:11:51 | 2023-08-31T20:11:51 | 100,315,955 | 47 | 40 | MIT | 2023-09-14T21:00:21 | 2017-08-14T22:58:33 | Python | UTF-8 | Python | false | false | 885 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._security_key_swagger_credential_flag import SecurityKeySwaggerCredentialFlag
try:
from ._patch import __all__ as _patch_all
from ._patch import * # pylint: disable=unused-wildcard-import
except ImportError:
_patch_all = []
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"SecurityKeySwaggerCredentialFlag",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
| [
"[email protected]"
]
| |
f7cddd4b937b32d86f7d5852110289a46243c052 | aa06473d26ee952278eada3713c92114f317be73 | /aoc2020/11/main.py | 17c4b2dda92cccdcb61a22eb73c3b4938f213ce3 | []
| no_license | allefant/aoc | 1888b3434379dbee5faf982fcdcf7e4a61b2ca3c | 861421794ac0b57c037a593776fb0dcb9458f1aa | refs/heads/master | 2023-01-28T12:18:36.992292 | 2020-12-11T01:13:24 | 2020-12-11T22:08:01 | 319,518,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,090 | py | #!/usr/bin/env python3
import sys
import re
def step(m, w, h):
m2 = []
for y in range(h):
row = m[y]
row2 = []
for x in range(w):
occupied = 0
if row[x] != "B":
for j in range(-1, 2):
for i in range(-1, 2):
if m[y + j][x + i] == "#":
occupied += 1
if row[x] == "L":
if occupied == 0:
row2.append("#")
else:
row2.append(row[x])
elif row[x] == "#":
if occupied >= 5:
row2.append("L")
else:
row2.append(row[x])
else:
row2.append(row[x])
m2.append(row2)
return m2
def count(m, w, h, what):
c = 0
for y in range(h):
row = m[y]
c += row.count(what)
return c
def part1(input):
m0 = []
for row_ in open(input):
row = row_.strip()
m0.append(row)
h = len(m0)
w = len(m0[0])
print(w, h)
m = []
m.append("B" * (w + 2))
for i in range(h):
m.append("B" + m0[i] + "B")
m.append("B" * (w + 2))
w += 2
h += 2
while True:
m2 = step(m, w, h)
if m == m2:
print(count(m, w, h, "#"))
break
m = m2
def step2(m, w, h):
m2 = []
for y in range(h):
row = m[y]
row2 = []
for x in range(w):
occupied = 0
if row[x] != "B":
for j in range(-1, 2):
for i in range(-1, 2):
if i == 0 and j == 0: continue
dx = 0
dy = 0
while True:
dx += i
dy += j
if m[y + dy][x + dx] == ".":
continue
if m[y + dy][x + dx] == "#":
occupied += 1
break
if row[x] == "L":
if occupied == 0:
row2.append("#")
else:
row2.append(row[x])
elif row[x] == "#":
if occupied >= 5:
row2.append("L")
else:
row2.append(row[x])
else:
row2.append(row[x])
m2.append(row2)
return m2
def part2(input):
m0 = []
for row_ in open(input):
row = row_.strip()
m0.append(row)
h = len(m0)
w = len(m0[0])
print(w, h)
m = []
m.append("B" * (w + 2))
for i in range(h):
m.append("B" + m0[i] + "B")
m.append("B" * (w + 2))
w += 2
h += 2
while True:
m2 = step2(m, w, h)
if m == m2:
print(count(m, w, h, "#"))
break
m = m2
if __name__ == "__main__":
if sys.argv[1] == "1": part1(sys.argv[2])
if sys.argv[1] == "2": part2(sys.argv[2])
| [
"[email protected]"
]
| |
c3b05651a69dbc6cf08cd1913dbb82a5cc28b69f | bfe6c95fa8a2aae3c3998bd59555583fed72900a | /repeatedSubstringPattern.py | b817bc879b45fdd26baf685fb791f702126c8410 | []
| no_license | zzz136454872/leetcode | f9534016388a1ba010599f4771c08a55748694b2 | b5ea6c21bff317884bdb3d7e873aa159b8c30215 | refs/heads/master | 2023-09-01T17:26:57.624117 | 2023-08-29T03:18:56 | 2023-08-29T03:18:56 | 240,464,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py |
class Solution:
def repeatedSubstringPattern(self, s: str) -> bool:
return (s+s).find(s,1)!=len(s)
s='aba'
sl=Solution()
print(sl.repeatedSubstringPattern(s))
| [
"[email protected]"
]
| |
3e84cc5ac7365f662154cb0e841b9c5e7e17f70d | 29a145a353fe744a45ed07f92785909d5e9d4759 | /nets/densenet161.py | 07f5c8c8c67bb27d1a41472dac8af7839e112314 | [
"Apache-2.0"
]
| permissive | ahmdtaha/FineGrainedVisualRecognition | b38fd30ca61bd33e41f8e327d45b712611751288 | 7f0dc7f92f5baaa1bdfc028e4dd4d7459d2270f0 | refs/heads/master | 2021-10-11T12:25:16.945822 | 2021-10-09T23:20:07 | 2021-10-09T23:20:07 | 148,684,550 | 22 | 7 | null | null | null | null | UTF-8 | Python | false | false | 14,926 | py | # Copyright 2016 pudae. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition of the DenseNet architecture.
As described in https://arxiv.org/abs/1608.06993.
Densely Connected Convolutional Networks
Gao Huang, Zhuang Liu, Kilian Q. Weinberger, Laurens van der Maaten
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
import constants as const
import nets.nn_utils as nn_utils
import utils.os_utils as os_utils
import os
@slim.add_arg_scope
def _global_avg_pool2d(inputs, data_format='NHWC', scope=None, outputs_collections=None):
with tf.variable_scope(scope, 'xx', [inputs]) as sc:
axis = [1, 2] if data_format == 'NHWC' else [2, 3]
net = tf.reduce_mean(inputs, axis=axis, keep_dims=True)
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)
return net
@slim.add_arg_scope
def _conv(inputs, num_filters, kernel_size, stride=1, dropout_rate=None,
scope=None, outputs_collections=None):
with tf.variable_scope(scope, 'xx', [inputs]) as sc:
net = slim.batch_norm(inputs)
net = tf.nn.relu(net)
net = slim.conv2d(net, num_filters, kernel_size)
if dropout_rate:
net = tf.nn.dropout(net)
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)
return net
@slim.add_arg_scope
def _conv_block(inputs, num_filters, data_format='NHWC', scope=None, outputs_collections=None):
with tf.variable_scope(scope, 'conv_blockx', [inputs]) as sc:
net = inputs
net = _conv(net, num_filters*4, 1, scope='x1')
net = _conv(net, num_filters, 3, scope='x2')
if data_format == 'NHWC':
net = tf.concat([inputs, net], axis=3)
else: # "NCHW"
net = tf.concat([inputs, net], axis=1)
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)
return net
@slim.add_arg_scope
def _dense_block(inputs, num_layers, num_filters, growth_rate,
grow_num_filters=True, scope=None, outputs_collections=None):
with tf.variable_scope(scope, 'dense_blockx', [inputs]) as sc:
net = inputs
for i in range(num_layers):
branch = i + 1
net = _conv_block(net, growth_rate, scope='conv_block'+str(branch))
if grow_num_filters:
num_filters += growth_rate
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)
return net, num_filters
@slim.add_arg_scope
def _transition_block(inputs, num_filters, compression=1.0,
scope=None, outputs_collections=None):
num_filters = int(num_filters * compression)
with tf.variable_scope(scope, 'transition_blockx', [inputs]) as sc:
net = inputs
net = _conv(net, num_filters, 1, scope='blk')
net = slim.avg_pool2d(net, 2)
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)
return net, num_filters
def densenet(inputs,
num_classes=1000,
reduction=None,
growth_rate=None,
num_filters=None,
num_layers=None,
dropout_rate=None,
data_format='NHWC',
is_training=True,
reuse=None,
scope=None):
assert reduction is not None
assert growth_rate is not None
assert num_filters is not None
assert num_layers is not None
compression = 1.0 - reduction
num_dense_blocks = len(num_layers)
if data_format == 'NCHW':
inputs = tf.transpose(inputs, [0, 3, 1, 2])
with tf.variable_scope(scope, 'densenetxxx', [inputs, num_classes],
reuse=reuse) as sc:
end_points_collection = sc.name + '_end_points'
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training), \
slim.arg_scope([slim.conv2d, _conv, _conv_block,
_dense_block, _transition_block],
outputs_collections=end_points_collection), \
slim.arg_scope([_conv], dropout_rate=dropout_rate):
net = inputs
# initial convolution
net = slim.conv2d(net, num_filters, 7, stride=2, scope='conv1')
net = slim.batch_norm(net)
net = tf.nn.relu(net)
net = slim.max_pool2d(net, 3, stride=2, padding='SAME')
# blocks
for i in range(num_dense_blocks - 1):
# dense blocks
net, num_filters = _dense_block(net, num_layers[i], num_filters,
growth_rate,
scope='dense_block' + str(i+1))
# Add transition_block
net, num_filters = _transition_block(net, num_filters,
compression=compression,
scope='transition_block' + str(i+1))
net, num_filters = _dense_block(
net, num_layers[-1], num_filters,
growth_rate,
scope='dense_block' + str(num_dense_blocks))
# final blocks
with tf.variable_scope('final_block', [inputs]):
net = slim.batch_norm(net)
net = tf.nn.relu(net)
net = _global_avg_pool2d(net, scope='global_avg_pool')
net = slim.conv2d(net, num_classes, 1,
biases_initializer=tf.zeros_initializer(),
scope='logits')
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if num_classes is not None:
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
def densenet121(inputs, num_classes=1000, data_format='NHWC', is_training=True, reuse=None):
return densenet(inputs,
num_classes=num_classes,
reduction=0.5,
growth_rate=32,
num_filters=64,
num_layers=[6,12,24,16],
data_format=data_format,
is_training=is_training,
reuse=reuse,
scope='densenet121')
densenet121.default_image_size = 224
def densenet161(inputs, num_classes=1000, data_format='NHWC', is_training=True, reuse=None):
return densenet(inputs,
num_classes=num_classes,
reduction=0.5,
growth_rate=48,
num_filters=96,
num_layers=[6,12,36,24],
data_format=data_format,
is_training=is_training,
reuse=reuse,
scope='densenet161')
densenet161.default_image_size = 224
def densenet169(inputs, num_classes=1000, data_format='NHWC', is_training=True, reuse=None):
return densenet(inputs,
num_classes=num_classes,
reduction=0.5,
growth_rate=32,
num_filters=64,
num_layers=[6,12,32,32],
data_format=data_format,
is_training=is_training,
reuse=reuse,
scope='densenet169')
densenet169.default_image_size = 224
def densenet_arg_scope(weight_decay=1e-4,
batch_norm_decay=0.999,
batch_norm_epsilon=1e-5,
data_format='NHWC'):
with slim.arg_scope([slim.conv2d, slim.batch_norm, slim.avg_pool2d, slim.max_pool2d,
_conv_block, _global_avg_pool2d],
data_format=data_format):
with slim.arg_scope([slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=None,
biases_initializer=None):
with slim.arg_scope([slim.batch_norm],
scale=True,
decay=batch_norm_decay,
epsilon=batch_norm_epsilon) as scope:
return scope
class DenseNet161:
def var_2_train(self):
scopes = [scope.strip() for scope in 'densenet161/logits'.split(',')]
variables_to_train = []
for scope in scopes:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
variables_to_train.extend(variables)
# print(variables_to_train)
return variables_to_train;
def load_model(self,save_model_dir,ckpt_file,sess,saver,is_finetuning=False):
if (os.path.exists(save_model_dir) and os_utils.chkpt_exists(save_model_dir)):
# Try to restore everything if possible
saver.restore(sess, ckpt_file)
print('Model Loaded Normally');
return 'Model Loaded Normally';
else:
print('Failed to Model Loaded Normally from ',ckpt_file);
if(is_finetuning):
exclusions = [scope.strip() for scope in 'global_step,densenet161/logits'.split(',')]
else:
exclusions = [scope.strip() for scope in '**'.split(',')]
variables_to_restore = []
for var in tf.contrib.slim.get_model_variables():
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
break
else:
variables_to_restore.append(var)
# print(variables_to_restore)
init_fn = tf.contrib.framework.assign_from_checkpoint_fn(self.cfg.imagenet__weights_filepath, variables_to_restore,ignore_missing_vars=False)
init_fn(sess)
print('Some variables loaded from imagenet')
return 'Failed to Model Loaded Normally from '+ckpt_file
def __init__(self,cfg, weight_decay=0.0001, data_format='NHWC',is_training=False,reuse=None,
images_ph = None,
lbls_ph = None):
self.cfg = cfg
batch_size = None
num_classes = cfg.num_classes
if lbls_ph is not None:
self.gt_lbls = tf.reshape(lbls_ph,[-1,num_classes])
else:
self.gt_lbls = tf.placeholder(tf.int32, shape=(batch_size, num_classes), name='class_lbls')
self.augment_input = tf.placeholder(tf.bool, name='augment_input')
## Check whether to use placeholder for training (images_ph == None),
# or the caller training procedure already provide images dataset pipeline
if images_ph is not None:
## If training using images TF dataset pipeline, no need to do augmentation,
# just make sure the input is in the correct shape
## This alternative is more efficient because it avoid the discouraged TF placeholder usage
self.input = images_ph
_, w, h, c = self.input.shape
aug_imgs = tf.reshape(self.input, [-1, w, h, 3])
else:
# If the input provide no images TF dataset pipeline
# Revert to the traditional placeholder usage
self.input = tf.placeholder(tf.float32, shape=(batch_size, const.max_frame_size, const.max_frame_size,
const.frame_channels), name='context_input')
## Training procedure controls whether to augment placeholder images
# using self.augment_input bool tensor
aug_imgs = tf.cond(self.augment_input,
lambda: nn_utils.augment(self.input, horizontal_flip=True, vertical_flip=False,
rotate=0, crop_probability=0, color_aug_probability=0)
, lambda: nn_utils.center_crop(self.input))
with tf.contrib.slim.arg_scope(densenet_arg_scope(weight_decay=weight_decay, data_format=data_format)):
nets, train_end_points = densenet(aug_imgs,
num_classes=num_classes,
reduction=0.5,
growth_rate=48,
num_filters=96,
num_layers=[6,12,36,24],
data_format=data_format,
is_training=True,
reuse=None,
scope='densenet161')
val_nets, val_end_points = densenet(aug_imgs,
num_classes=num_classes,
reduction=0.5,
growth_rate=48,
num_filters=96,
num_layers=[6, 12, 36, 24],
data_format=data_format,
is_training=False, ## Set is always to false
reuse=True,
scope='densenet161')
def cal_metrics(end_points):
gt = tf.argmax(self.gt_lbls, 1);
logits = tf.reshape(end_points['densenet161/logits'], [-1, num_classes])
pre_logits = end_points['densenet161/dense_block4']
center_supervised_cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.gt_lbls,
logits=logits,
name='xentropy_center')
loss = tf.reduce_mean(center_supervised_cross_entropy, name='xentropy_mean')
predictions = tf.reshape(end_points['predictions'], [-1, num_classes])
class_prediction = tf.argmax(predictions, 1)
supervised_correct_prediction = tf.equal(gt, class_prediction)
supervised_correct_prediction_cast = tf.cast(supervised_correct_prediction, tf.float32)
accuracy = tf.reduce_mean(supervised_correct_prediction_cast)
confusion_mat = tf.confusion_matrix(gt, class_prediction, num_classes=num_classes)
_, accumulated_accuracy = tf.metrics.accuracy(gt, class_prediction)
return loss,pre_logits,accuracy,confusion_mat,accumulated_accuracy
self.train_loss,self.train_pre_logits,self.train_accuracy,self.train_confusion_mat,self.train_accumulated_accuracy = cal_metrics(train_end_points);
self.val_loss,self.val_pre_logits,self.val_accuracy, self.val_confusion_mat, self.val_accumulated_accuracy = cal_metrics(val_end_points);
| [
"[email protected]"
]
| |
733761a83dcb9ee584718dff751838456c8c948a | 3cf41d1510239ce6987b878aabae1bcc8013fa33 | /account/migrations/0006_auto_20180815_0913.py | 6bb424fd8ef1354f07822ded3d51bf1a78465f34 | []
| no_license | djangogirlsbh/food | cc28bd23aff5593260aeab83b595cafc6ddf7d63 | bd809df79a373b33d12b489b3a5e468dc8cde4d5 | refs/heads/master | 2020-03-29T20:42:29.959148 | 2018-08-29T12:49:47 | 2018-08-29T12:49:47 | 150,326,438 | 0 | 1 | null | 2018-09-25T20:38:45 | 2018-09-25T20:38:44 | null | UTF-8 | Python | false | false | 3,541 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-08-15 09:13
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('account', '0005_auto_20180421_1237'),
]
operations = [
migrations.CreateModel(
name='BusinessUnit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='business unit name', max_length=100, unique=True)),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='account.BusinessUnit')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='employee name', max_length=100, unique=True)),
('email', models.EmailField(max_length=254)),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='employee', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Employment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('employment_date', models.DateField(null=True)),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('businessunit', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.BusinessUnit')),
('employee', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.Employee')),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='reportees', to='account.Employment')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Position',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='employment',
name='position',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.Position'),
),
]
| [
"[email protected]"
]
| |
30792d92890cd2cc84ffe53925265f4a6dd48b76 | 1adf769cf9234f9b6c619f808d2723b99451d679 | /rusentrel/classic_cv/common.py | af55d7f1421214a903c7b1ec094ed9a00b6797a6 | [
"MIT"
]
| permissive | DAVMARROS/attitude-extraction-with-attention-and-ds | 4e85fa154ead0cd9499aaedf5d752ac565f37b92 | fb8e9d0d9488363738a88c4c447c7a8cb3e2ec1d | refs/heads/master | 2023-02-09T04:56:24.090380 | 2020-12-30T10:09:34 | 2020-12-30T10:09:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | from rusentrel.classic.common import classic_common_callback_modification_func
CV_COUNT = 3
CV_NAME_PREFIX = u'cv_'
def classic_cv_common_callback_modification_func(callback):
"""
This function describes configuration setup for all model callbacks.
"""
classic_common_callback_modification_func(callback)
callback.set_cancellation_acc_bound(0.981)
callback.set_cancellation_f1_train_bound(0.85)
callback.set_key_save_hidden_parameters(False)
callback.set_key_stop_training_by_cost(True)
| [
"[email protected]"
]
| |
043fc7966bba39396a2d0d6a71cca52101550d54 | 17f3568e0be991636501970fb76c4c53a71ab38d | /opsgenie_sdk/api/alert/list_saved_searches_response_all_of.py | 54af2fc57517f94a7a9510294093791705444e21 | [
"Apache-2.0"
]
| permissive | jkinred/opsgenie-python-sdk | 7b79ed8c7518de117887e6b76a3fbb5800b94020 | 69bbd671d2257c6c3ab2f3f113cb62bd1a941c02 | refs/heads/master | 2020-07-10T00:24:19.583708 | 2019-08-24T06:35:31 | 2019-08-24T06:35:31 | 204,118,572 | 0 | 0 | NOASSERTION | 2019-08-24T06:29:25 | 2019-08-24T06:29:24 | null | UTF-8 | Python | false | false | 3,077 | py | # coding: utf-8
"""
Python SDK for Opsgenie REST API
Python SDK for Opsgenie REST API # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ListSavedSearchesResponseAllOf(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'data': 'list[SavedSearchMeta]'
}
attribute_map = {
'data': 'data'
}
def __init__(self, data=None): # noqa: E501
"""ListSavedSearchesResponseAllOf - a model defined in OpenAPI""" # noqa: E501
self._data = None
self.discriminator = None
if data is not None:
self.data = data
@property
def data(self):
"""Gets the data of this ListSavedSearchesResponseAllOf. # noqa: E501
:return: The data of this ListSavedSearchesResponseAllOf. # noqa: E501
:rtype: list[SavedSearchMeta]
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this ListSavedSearchesResponseAllOf.
:param data: The data of this ListSavedSearchesResponseAllOf. # noqa: E501
:type: list[SavedSearchMeta]
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListSavedSearchesResponseAllOf):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
b512df2f51179c5b81503c7fb4d3ef456da692d3 | 7365f2410c139c5f4bf5ba0777ed0321322c92d9 | /python/二叉树中和为某一值的路径.py | 5c2c04737f7db1ddc05645e8c66ed1f70822e455 | []
| no_license | EvanJamesMG/Point-to-the-offer | 956a17a3c2a0d99a11428765f6af9f4ebbbe5fc3 | cc9b6b7572cf819f0e53a800899e1ebd9fd6cf9d | refs/heads/master | 2021-01-10T17:11:06.125860 | 2016-04-21T03:47:15 | 2016-04-21T03:47:15 | 52,489,364 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,845 | py | # coding=utf-8
__author__ = 'EvanJames'
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
'''
题目描述
输入一颗二叉树和一个整数,打印出二叉树中结点值的和为输入整数的所有路径。
路径定义为从树的根结点开始往下一直到叶结点所经过的结点形成一条路径。
解题思路:DFS
注意坑:编程时候,valuelist.append(1)是对自己赋值,此时tem =valulist.append(1) 是不对的,
要想重新生成新的数组,应写为 valuelist+[1]
'''
class Solution:
# 返回二维列表,内部每个列表表示找到的路径
def FindPath(self, root, expectNumber):
if root == None:
return []
self.res = []
self.DFS(root, expectNumber - root.val, [root.val])
return self.res
def DFS(self, root, expectNumber, valuelist):
if root.left == None and root.right == None and expectNumber == 0:
self.res.append(valuelist)
if root.left:
self.DFS(root.left, expectNumber - root.left.val, valuelist+[root.left.val])
if root.right:
self.DFS(root.right, expectNumber - root.right.val, valuelist+[root.right.val])
if __name__ == '__main__':
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
res = Solution().FindPath(root, 3)
print(res)
'''
The fucking Java code!
import java.util.ArrayList;
public class test {
public static void main(String[] args){
ArrayList<ArrayList<Integer>> res= new ArrayList<ArrayList<Integer>>();
TreeNode root = new TreeNode(1);
root.left = new TreeNode(2);
root.right = new TreeNode(3);
res= FindPath(root,4);
System.out.println(res);
}
public static ArrayList<ArrayList<Integer>> FindPath(TreeNode root,int target) {
ArrayList<ArrayList<Integer>> res= new ArrayList<ArrayList<Integer>>();
if(root == null)
return res;
ArrayList<Integer> valuelist = new ArrayList<Integer>() ;
valuelist.add(root.val);
DFS(root,target-root.val,valuelist,res);
return res;
}
private static void DFS(TreeNode root, int sum, ArrayList<Integer> valuelist, ArrayList<ArrayList<Integer>> res) {
// TODO Auto-generated method stub
if(root.left==null && root.right ==null && sum==0){
res.add(valuelist);
}
if(root.left!=null){
ArrayList<Integer> temlist = new ArrayList<Integer>(valuelist);
temlist.add(root.left.val);
DFS(root.left,sum-root.left.val,temlist,res);
}
if(root.right!=null){
ArrayList<Integer> temlist = new ArrayList<Integer>(valuelist);
temlist.add(root.right.val);
DFS(root.right,sum-root.right.val,temlist,res);
}
}
}
'''
| [
"[email protected]"
]
| |
75c97fc7b38cb2373276f35754ce6cf90e46de18 | 91863555b2bf1044a420c62a2f7e696724d5ca80 | /models/pointnet_cls.py | 12b466d2155e566df8fea15d3f7db5abf765ebc0 | []
| no_license | SIlvaMFPedro/pointcloud-networks | dd90f6767d7e5dcbffe6f719118450929ca06d91 | 1369d4f74e4f6e964465d6e39157031bd83aac97 | refs/heads/master | 2020-12-06T22:48:01.012743 | 2020-02-04T17:47:24 | 2020-02-04T17:47:24 | 232,573,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,113 | py | # ------------------------
# IMPORTS
# ------------------------
# Import the necessary packages
import tensorflow as tf
import numpy as np
import math
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '..\\utils'))
import utils.tf_util
from models.transform_nets import input_transform_net, feature_transform_net
# ------------------------
# FUNCTIONS
# ------------------------
def placeholder_inputs(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
labels_pl = tf.placeholder(tf.int32, shape=batch_size)
return pointclouds_pl, labels_pl
def get_model(point_cloud, is_training, bn_decay=None):
"""
Classification PointNet,
input is BxNx3,
output Bx40
"""
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
with tf.variable_scope('transform_net1') as sc:
transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
point_cloud_transformed = tf.matmul(point_cloud, transform)
input_image = tf.expand_dims(point_cloud_transformed, -1)
net = utils.tf_util.conv2d(input_image, 64, [1, 3], padding='VALID', stride=[1, 1], bn=True,
is_training=is_training, scope='conv1', bn_decay=bn_decay)
net = utils.tf_util.conv2d(net, 64, [1,1], padding='VALID', stride=[1,1], bn=True, is_training=is_training,
scope='conv2', bn_decay=bn_decay)
with tf.variable_scope('transform_net2') as sc:
transform = feature_transform_net(net, is_training, bn_decay, K=64)
end_points['transform'] = transform
net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
net_transformed = tf.expand_dims(net_transformed, [2])
net = utils.tf_util.conv2d(net_transformed, 64, [1, 1], padding='VALID', stride=[1, 1], bn=True,
is_training=is_training, scope='conv3', bn_decay=bn_decay)
net = utils.tf_util.conv2d(net, 128, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training,
scope='conv4', bn_decay=bn_decay)
net = utils.tf_util.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training,
scope='conv5', bn_decay=bn_decay)
# Symmetric function: max pooling
net = utils.tf_util.max_pool2d(net, [num_point, 1], padding='VALID', scope='maxpool')
net = tf.reshape(net, [batch_size, -1])
net = utils.tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = utils.tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp1')
net = utils.tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
net = utils.tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp2')
net = utils.tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')
return net, end_points
def get_loss(pred, label, end_points, reg_weight=0.001):
"""
pred: B*NUM_CLASSES,
label: B,
"""
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf.summary.scalar('classify loss', classify_loss)
# Enforce the transformation as orthogonal matrix
transform = end_points['transform'] # BxKxK
K = transform.get_shape()[1].value
mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0, 2, 1]))
mat_diff -= tf.constant(np.eye(K), dtype=tf.float32)
mat_diff_loss = tf.nn.l2_loss(mat_diff)
tf.summary.scalar('mat loss', mat_diff_loss)
return classify_loss + mat_diff_loss * reg_weight
# ------------------------
# MAIN FUNCTION
# ------------------------
if __name__ == '__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32, 1024, 3))
outputs = get_model(inputs, tf.constant(True))
print(outputs)
| [
"[email protected]"
]
| |
037740a786e00e1013b215f7045a3547f1c59296 | 7d90d2ce27c6ee0af74391b09909edbd45fdc2f0 | /renix_py_api/api_gen/IsisPortRateConfig_Autogen.py | 1609c4903b9fb7ae49ff15d9a4f7e2266edacdc0 | []
| no_license | gaoxingyu-hub/54testframework-master-e284 | d7ea0d4a715b65c8652430e963a86b9522a7237a | 57dd2197e7d91b8ad8fb2bd0e3503f10afa08544 | refs/heads/master | 2023-04-30T05:50:41.542402 | 2021-05-28T09:19:37 | 2021-05-28T09:19:37 | 309,922,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,902 | py | """
Auto-generated File
Create Time: 2019-12-27 02:33:27
"""
from .ROMEnum_Autogen import *
from renix_py_api.renix_common_api import *
from renix_py_api import rom_manager
from .ROMObject_Autogen import ROMObject
@rom_manager.rom
class IsisPortRateConfig(ROMObject):
def __init__(self, UpdateRoutesTransmitRate=None, **kwargs):
self._UpdateRoutesTransmitRate = UpdateRoutesTransmitRate # IS-IS Tx Hello Rate(messages/second)
properties = kwargs.copy()
if UpdateRoutesTransmitRate is not None:
properties['UpdateRoutesTransmitRate'] = UpdateRoutesTransmitRate
# call base class function, and it will send message to renix server to create a class.
super(IsisPortRateConfig, self).__init__(**properties)
def delete(self):
"""
call to delete itself
"""
return self._finalize()
def edit(self, UpdateRoutesTransmitRate=None, **kwargs):
properties = kwargs.copy()
if UpdateRoutesTransmitRate is not None:
self._UpdateRoutesTransmitRate = UpdateRoutesTransmitRate
properties['UpdateRoutesTransmitRate'] = UpdateRoutesTransmitRate
super(IsisPortRateConfig, self).edit(**properties)
@property
def UpdateRoutesTransmitRate(self):
"""
get the value of property _UpdateRoutesTransmitRate
"""
if self.force_auto_sync:
self.get('UpdateRoutesTransmitRate')
return self._UpdateRoutesTransmitRate
@UpdateRoutesTransmitRate.setter
def UpdateRoutesTransmitRate(self, value):
self._UpdateRoutesTransmitRate = value
self.edit(UpdateRoutesTransmitRate=value)
def _set_updateroutestransmitrate_with_str(self, value):
try:
self._UpdateRoutesTransmitRate = int(value)
except ValueError:
self._UpdateRoutesTransmitRate = hex(int(value, 16))
| [
"[email protected]"
]
| |
df3841a72cd7271a9ca254939df9eaefafd0c708 | f397d08afdc45479dafe124f4664628b8c31e473 | /python/migen/nmigen/nmigen/test/test_hdl_ast.py | d1c5bb52a134e7c6d4e980c96880012485e16c59 | [
"BSD-2-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | eiselekd/lang | 15e2110a0682f422d8cb424e100e3452b0ca00c6 | a35dc9490071ca55a2ea42ca52eb0b3a93ff0a7b | refs/heads/master | 2021-07-20T16:06:56.223511 | 2021-06-24T18:46:33 | 2021-06-24T18:46:33 | 95,143,543 | 0 | 0 | null | 2020-09-04T04:48:20 | 2017-06-22T18:10:55 | C | UTF-8 | Python | false | false | 18,069 | py | from ..hdl.ast import *
from .tools import *
class ValueTestCase(FHDLTestCase):
def test_wrap(self):
self.assertIsInstance(Value.wrap(0), Const)
self.assertIsInstance(Value.wrap(True), Const)
c = Const(0)
self.assertIs(Value.wrap(c), c)
with self.assertRaises(TypeError):
Value.wrap("str")
def test_bool(self):
with self.assertRaises(TypeError):
if Const(0):
pass
def test_len(self):
self.assertEqual(len(Const(10)), 4)
def test_getitem_int(self):
s1 = Const(10)[0]
self.assertIsInstance(s1, Slice)
self.assertEqual(s1.start, 0)
self.assertEqual(s1.end, 1)
s2 = Const(10)[-1]
self.assertIsInstance(s2, Slice)
self.assertEqual(s2.start, 3)
self.assertEqual(s2.end, 4)
with self.assertRaises(IndexError):
Const(10)[5]
def test_getitem_slice(self):
s1 = Const(10)[1:3]
self.assertIsInstance(s1, Slice)
self.assertEqual(s1.start, 1)
self.assertEqual(s1.end, 3)
s2 = Const(10)[1:-2]
self.assertIsInstance(s2, Slice)
self.assertEqual(s2.start, 1)
self.assertEqual(s2.end, 2)
s3 = Const(31)[::2]
self.assertIsInstance(s3, Cat)
self.assertIsInstance(s3.parts[0], Slice)
self.assertEqual(s3.parts[0].start, 0)
self.assertEqual(s3.parts[0].end, 1)
self.assertIsInstance(s3.parts[1], Slice)
self.assertEqual(s3.parts[1].start, 2)
self.assertEqual(s3.parts[1].end, 3)
self.assertIsInstance(s3.parts[2], Slice)
self.assertEqual(s3.parts[2].start, 4)
self.assertEqual(s3.parts[2].end, 5)
def test_getitem_wrong(self):
with self.assertRaises(TypeError):
Const(31)["str"]
class ConstTestCase(FHDLTestCase):
def test_shape(self):
self.assertEqual(Const(0).shape(), (1, False))
self.assertEqual(Const(1).shape(), (1, False))
self.assertEqual(Const(10).shape(), (4, False))
self.assertEqual(Const(-10).shape(), (5, True))
self.assertEqual(Const(1, 4).shape(), (4, False))
self.assertEqual(Const(1, (4, True)).shape(), (4, True))
self.assertEqual(Const(0, (0, False)).shape(), (0, False))
def test_shape_bad(self):
with self.assertRaises(TypeError):
Const(1, -1)
def test_normalization(self):
self.assertEqual(Const(0b10110, (5, True)).value, -10)
def test_value(self):
self.assertEqual(Const(10).value, 10)
def test_repr(self):
self.assertEqual(repr(Const(10)), "(const 4'd10)")
self.assertEqual(repr(Const(-10)), "(const 5'sd-10)")
def test_hash(self):
with self.assertRaises(TypeError):
hash(Const(0))
class OperatorTestCase(FHDLTestCase):
def test_bool(self):
v = Const(0, 4).bool()
self.assertEqual(repr(v), "(b (const 4'd0))")
self.assertEqual(v.shape(), (1, False))
def test_invert(self):
v = ~Const(0, 4)
self.assertEqual(repr(v), "(~ (const 4'd0))")
self.assertEqual(v.shape(), (4, False))
def test_neg(self):
v1 = -Const(0, (4, False))
self.assertEqual(repr(v1), "(- (const 4'd0))")
self.assertEqual(v1.shape(), (5, True))
v2 = -Const(0, (4, True))
self.assertEqual(repr(v2), "(- (const 4'sd0))")
self.assertEqual(v2.shape(), (4, True))
def test_add(self):
v1 = Const(0, (4, False)) + Const(0, (6, False))
self.assertEqual(repr(v1), "(+ (const 4'd0) (const 6'd0))")
self.assertEqual(v1.shape(), (7, False))
v2 = Const(0, (4, True)) + Const(0, (6, True))
self.assertEqual(v2.shape(), (7, True))
v3 = Const(0, (4, True)) + Const(0, (4, False))
self.assertEqual(v3.shape(), (6, True))
v4 = Const(0, (4, False)) + Const(0, (4, True))
self.assertEqual(v4.shape(), (6, True))
v5 = 10 + Const(0, 4)
self.assertEqual(v5.shape(), (5, False))
def test_sub(self):
v1 = Const(0, (4, False)) - Const(0, (6, False))
self.assertEqual(repr(v1), "(- (const 4'd0) (const 6'd0))")
self.assertEqual(v1.shape(), (7, False))
v2 = Const(0, (4, True)) - Const(0, (6, True))
self.assertEqual(v2.shape(), (7, True))
v3 = Const(0, (4, True)) - Const(0, (4, False))
self.assertEqual(v3.shape(), (6, True))
v4 = Const(0, (4, False)) - Const(0, (4, True))
self.assertEqual(v4.shape(), (6, True))
v5 = 10 - Const(0, 4)
self.assertEqual(v5.shape(), (5, False))
def test_mul(self):
v1 = Const(0, (4, False)) * Const(0, (6, False))
self.assertEqual(repr(v1), "(* (const 4'd0) (const 6'd0))")
self.assertEqual(v1.shape(), (10, False))
v2 = Const(0, (4, True)) * Const(0, (6, True))
self.assertEqual(v2.shape(), (9, True))
v3 = Const(0, (4, True)) * Const(0, (4, False))
self.assertEqual(v3.shape(), (8, True))
v5 = 10 * Const(0, 4)
self.assertEqual(v5.shape(), (8, False))
def test_and(self):
v1 = Const(0, (4, False)) & Const(0, (6, False))
self.assertEqual(repr(v1), "(& (const 4'd0) (const 6'd0))")
self.assertEqual(v1.shape(), (6, False))
v2 = Const(0, (4, True)) & Const(0, (6, True))
self.assertEqual(v2.shape(), (6, True))
v3 = Const(0, (4, True)) & Const(0, (4, False))
self.assertEqual(v3.shape(), (5, True))
v4 = Const(0, (4, False)) & Const(0, (4, True))
self.assertEqual(v4.shape(), (5, True))
v5 = 10 & Const(0, 4)
self.assertEqual(v5.shape(), (4, False))
def test_or(self):
v1 = Const(0, (4, False)) | Const(0, (6, False))
self.assertEqual(repr(v1), "(| (const 4'd0) (const 6'd0))")
self.assertEqual(v1.shape(), (6, False))
v2 = Const(0, (4, True)) | Const(0, (6, True))
self.assertEqual(v2.shape(), (6, True))
v3 = Const(0, (4, True)) | Const(0, (4, False))
self.assertEqual(v3.shape(), (5, True))
v4 = Const(0, (4, False)) | Const(0, (4, True))
self.assertEqual(v4.shape(), (5, True))
v5 = 10 | Const(0, 4)
self.assertEqual(v5.shape(), (4, False))
def test_xor(self):
v1 = Const(0, (4, False)) ^ Const(0, (6, False))
self.assertEqual(repr(v1), "(^ (const 4'd0) (const 6'd0))")
self.assertEqual(v1.shape(), (6, False))
v2 = Const(0, (4, True)) ^ Const(0, (6, True))
self.assertEqual(v2.shape(), (6, True))
v3 = Const(0, (4, True)) ^ Const(0, (4, False))
self.assertEqual(v3.shape(), (5, True))
v4 = Const(0, (4, False)) ^ Const(0, (4, True))
self.assertEqual(v4.shape(), (5, True))
v5 = 10 ^ Const(0, 4)
self.assertEqual(v5.shape(), (4, False))
def test_shl(self):
v1 = Const(1, 4) << Const(4)
self.assertEqual(repr(v1), "(<< (const 4'd1) (const 3'd4))")
self.assertEqual(v1.shape(), (11, False))
v2 = Const(1, 4) << Const(-3)
self.assertEqual(v2.shape(), (7, False))
def test_shr(self):
v1 = Const(1, 4) >> Const(4)
self.assertEqual(repr(v1), "(>> (const 4'd1) (const 3'd4))")
self.assertEqual(v1.shape(), (4, False))
v2 = Const(1, 4) >> Const(-3)
self.assertEqual(v2.shape(), (8, False))
def test_lt(self):
v = Const(0, 4) < Const(0, 6)
self.assertEqual(repr(v), "(< (const 4'd0) (const 6'd0))")
self.assertEqual(v.shape(), (1, False))
def test_le(self):
v = Const(0, 4) <= Const(0, 6)
self.assertEqual(repr(v), "(<= (const 4'd0) (const 6'd0))")
self.assertEqual(v.shape(), (1, False))
def test_gt(self):
v = Const(0, 4) > Const(0, 6)
self.assertEqual(repr(v), "(> (const 4'd0) (const 6'd0))")
self.assertEqual(v.shape(), (1, False))
def test_ge(self):
v = Const(0, 4) >= Const(0, 6)
self.assertEqual(repr(v), "(>= (const 4'd0) (const 6'd0))")
self.assertEqual(v.shape(), (1, False))
def test_eq(self):
v = Const(0, 4) == Const(0, 6)
self.assertEqual(repr(v), "(== (const 4'd0) (const 6'd0))")
self.assertEqual(v.shape(), (1, False))
def test_ne(self):
v = Const(0, 4) != Const(0, 6)
self.assertEqual(repr(v), "(!= (const 4'd0) (const 6'd0))")
self.assertEqual(v.shape(), (1, False))
def test_mux(self):
s = Const(0)
v1 = Mux(s, Const(0, (4, False)), Const(0, (6, False)))
self.assertEqual(repr(v1), "(m (const 1'd0) (const 4'd0) (const 6'd0))")
self.assertEqual(v1.shape(), (6, False))
v2 = Mux(s, Const(0, (4, True)), Const(0, (6, True)))
self.assertEqual(v2.shape(), (6, True))
v3 = Mux(s, Const(0, (4, True)), Const(0, (4, False)))
self.assertEqual(v3.shape(), (5, True))
v4 = Mux(s, Const(0, (4, False)), Const(0, (4, True)))
self.assertEqual(v4.shape(), (5, True))
def test_bool(self):
v = Const(0).bool()
self.assertEqual(repr(v), "(b (const 1'd0))")
self.assertEqual(v.shape(), (1, False))
def test_hash(self):
with self.assertRaises(TypeError):
hash(Const(0) + Const(0))
class SliceTestCase(FHDLTestCase):
def test_shape(self):
s1 = Const(10)[2]
self.assertEqual(s1.shape(), (1, False))
s2 = Const(-10)[0:2]
self.assertEqual(s2.shape(), (2, False))
def test_start_end_negative(self):
c = Const(0, 8)
s1 = Slice(c, 0, -1)
self.assertEqual((s1.start, s1.end), (0, 7))
s1 = Slice(c, -4, -1)
self.assertEqual((s1.start, s1.end), (4, 7))
def test_start_end_wrong(self):
with self.assertRaises(TypeError):
Slice(0, "x", 1)
with self.assertRaises(TypeError):
Slice(0, 1, "x")
def test_start_end_out_of_range(self):
c = Const(0, 8)
with self.assertRaises(IndexError):
Slice(c, 10, 12)
with self.assertRaises(IndexError):
Slice(c, 0, 12)
with self.assertRaises(IndexError):
Slice(c, 4, 2)
def test_repr(self):
s1 = Const(10)[2]
self.assertEqual(repr(s1), "(slice (const 4'd10) 2:3)")
class PartTestCase(FHDLTestCase):
def setUp(self):
self.c = Const(0, 8)
self.s = Signal(max=self.c.nbits)
def test_shape(self):
s1 = self.c.part(self.s, 2)
self.assertEqual(s1.shape(), (2, False))
s2 = self.c.part(self.s, 0)
self.assertEqual(s2.shape(), (0, False))
def test_width_bad(self):
with self.assertRaises(TypeError):
self.c.part(self.s, -1)
def test_repr(self):
s = self.c.part(self.s, 2)
self.assertEqual(repr(s), "(part (const 8'd0) (sig s) 2)")
class CatTestCase(FHDLTestCase):
def test_shape(self):
c1 = Cat(Const(10))
self.assertEqual(c1.shape(), (4, False))
c2 = Cat(Const(10), Const(1))
self.assertEqual(c2.shape(), (5, False))
c3 = Cat(Const(10), Const(1), Const(0))
self.assertEqual(c3.shape(), (6, False))
def test_repr(self):
c1 = Cat(Const(10), Const(1))
self.assertEqual(repr(c1), "(cat (const 4'd10) (const 1'd1))")
class ReplTestCase(FHDLTestCase):
def test_shape(self):
s1 = Repl(Const(10), 3)
self.assertEqual(s1.shape(), (12, False))
s2 = Repl(Const(10), 0)
self.assertEqual(s2.shape(), (0, False))
def test_count_wrong(self):
with self.assertRaises(TypeError):
Repl(Const(10), -1)
with self.assertRaises(TypeError):
Repl(Const(10), "str")
def test_repr(self):
s = Repl(Const(10), 3)
self.assertEqual(repr(s), "(repl (const 4'd10) 3)")
class ArrayTestCase(FHDLTestCase):
def test_acts_like_array(self):
a = Array([1,2,3])
self.assertSequenceEqual(a, [1,2,3])
self.assertEqual(a[1], 2)
a[1] = 4
self.assertSequenceEqual(a, [1,4,3])
del a[1]
self.assertSequenceEqual(a, [1,3])
a.insert(1, 2)
self.assertSequenceEqual(a, [1,2,3])
def test_becomes_immutable(self):
a = Array([1,2,3])
s1 = Signal(max=len(a))
s2 = Signal(max=len(a))
v1 = a[s1]
v2 = a[s2]
with self.assertRaisesRegex(ValueError,
regex=r"^Array can no longer be mutated after it was indexed with a value at "):
a[1] = 2
with self.assertRaisesRegex(ValueError,
regex=r"^Array can no longer be mutated after it was indexed with a value at "):
del a[1]
with self.assertRaisesRegex(ValueError,
regex=r"^Array can no longer be mutated after it was indexed with a value at "):
a.insert(1, 2)
def test_repr(self):
a = Array([1,2,3])
self.assertEqual(repr(a), "(array mutable [1, 2, 3])")
s = Signal(max=len(a))
v = a[s]
self.assertEqual(repr(a), "(array [1, 2, 3])")
class ArrayProxyTestCase(FHDLTestCase):
def test_index_shape(self):
m = Array(Array(x * y for y in range(1, 4)) for x in range(1, 4))
a = Signal(max=3)
b = Signal(max=3)
v = m[a][b]
self.assertEqual(v.shape(), (4, False))
def test_attr_shape(self):
from collections import namedtuple
pair = namedtuple("pair", ("p", "n"))
a = Array(pair(i, -i) for i in range(10))
s = Signal(max=len(a))
v = a[s]
self.assertEqual(v.p.shape(), (4, False))
self.assertEqual(v.n.shape(), (6, True))
def test_repr(self):
a = Array([1, 2, 3])
s = Signal(max=3)
v = a[s]
self.assertEqual(repr(v), "(proxy (array [1, 2, 3]) (sig s))")
class SignalTestCase(FHDLTestCase):
def test_shape(self):
s1 = Signal()
self.assertEqual(s1.shape(), (1, False))
s2 = Signal(2)
self.assertEqual(s2.shape(), (2, False))
s3 = Signal((2, False))
self.assertEqual(s3.shape(), (2, False))
s4 = Signal((2, True))
self.assertEqual(s4.shape(), (2, True))
s5 = Signal(max=16)
self.assertEqual(s5.shape(), (4, False))
s6 = Signal(min=4, max=16)
self.assertEqual(s6.shape(), (4, False))
s7 = Signal(min=-4, max=16)
self.assertEqual(s7.shape(), (5, True))
s8 = Signal(min=-20, max=16)
self.assertEqual(s8.shape(), (6, True))
s9 = Signal(0)
self.assertEqual(s9.shape(), (0, False))
def test_shape_bad(self):
with self.assertRaises(ValueError):
Signal(min=10, max=4)
with self.assertRaises(ValueError):
Signal(2, min=10)
with self.assertRaises(TypeError):
Signal(-10)
def test_name(self):
s1 = Signal()
self.assertEqual(s1.name, "s1")
s2 = Signal(name="sig")
self.assertEqual(s2.name, "sig")
def test_reset(self):
s1 = Signal(4, reset=0b111, reset_less=True)
self.assertEqual(s1.reset, 0b111)
self.assertEqual(s1.reset_less, True)
def test_attrs(self):
s1 = Signal()
self.assertEqual(s1.attrs, {})
s2 = Signal(attrs={"no_retiming": True})
self.assertEqual(s2.attrs, {"no_retiming": True})
def test_repr(self):
s1 = Signal()
self.assertEqual(repr(s1), "(sig s1)")
def test_like(self):
s1 = Signal.like(Signal(4))
self.assertEqual(s1.shape(), (4, False))
s2 = Signal.like(Signal(min=-15))
self.assertEqual(s2.shape(), (5, True))
s3 = Signal.like(Signal(4, reset=0b111, reset_less=True))
self.assertEqual(s3.reset, 0b111)
self.assertEqual(s3.reset_less, True)
s4 = Signal.like(Signal(attrs={"no_retiming": True}))
self.assertEqual(s4.attrs, {"no_retiming": True})
s5 = Signal.like(Signal(decoder=str))
self.assertEqual(s5.decoder, str)
s6 = Signal.like(10)
self.assertEqual(s6.shape(), (4, False))
s7 = [Signal.like(Signal(4))][0]
self.assertEqual(s7.name, "$like")
class ClockSignalTestCase(FHDLTestCase):
def test_domain(self):
s1 = ClockSignal()
self.assertEqual(s1.domain, "sync")
s2 = ClockSignal("pix")
self.assertEqual(s2.domain, "pix")
with self.assertRaises(TypeError):
ClockSignal(1)
def test_shape(self):
self.assertEqual(ClockSignal().shape(), (1, False))
def test_repr(self):
s1 = ClockSignal()
self.assertEqual(repr(s1), "(clk sync)")
class ResetSignalTestCase(FHDLTestCase):
def test_domain(self):
s1 = ResetSignal()
self.assertEqual(s1.domain, "sync")
s2 = ResetSignal("pix")
self.assertEqual(s2.domain, "pix")
with self.assertRaises(TypeError):
ResetSignal(1)
def test_shape(self):
self.assertEqual(ResetSignal().shape(), (1, False))
def test_repr(self):
s1 = ResetSignal()
self.assertEqual(repr(s1), "(rst sync)")
class SampleTestCase(FHDLTestCase):
def test_const(self):
s = Sample(1, 1, "sync")
self.assertEqual(s.shape(), (1, False))
def test_signal(self):
s1 = Sample(Signal(2), 1, "sync")
self.assertEqual(s1.shape(), (2, False))
s2 = Sample(ClockSignal(), 1, "sync")
s3 = Sample(ResetSignal(), 1, "sync")
def test_wrong_value_operator(self):
with self.assertRaises(TypeError,
"Sampled value may only be a signal or a constant, not "
"(+ (sig $signal) (const 1'd1))"):
Sample(Signal() + 1, 1, "sync")
def test_wrong_clocks_neg(self):
with self.assertRaises(ValueError,
"Cannot sample a value 1 cycles in the future"):
Sample(Signal(), -1, "sync")
| [
"[email protected]"
]
| |
25ee11f1d07c9473ad9c0c5898e66388e55da898 | 7b034caedfa49de09c3883401afa001ce234dea7 | /utils/queue_utils/output_job_queue.py | 411660283561f473f5ec4913ea5a30c2f548de20 | [
"MIT"
]
| permissive | Brown-University-Library/usep_gh_handler_app | 36cbc81d3233838ba0d511b27a050884d3b61baa | b271b8f26a3c27910445f1c0b55f9fbf6648865a | refs/heads/main | 2023-05-28T13:44:46.124344 | 2021-11-08T16:06:13 | 2021-11-08T16:06:13 | 19,741,222 | 0 | 2 | MIT | 2023-05-22T23:19:37 | 2014-05-13T14:00:38 | Python | UTF-8 | Python | false | false | 757 | py | # -*- coding: utf-8 -*-
import os, pprint
import redis, rq
queue_name = 'usep'
q = rq.Queue( queue_name, connection=redis.Redis() )
print( '- number of jobs in queue `%s`: %s' % (queue_name, len(q.jobs)) )
for job in q.jobs:
job_d = {
'_args': job._args,
'_kwargs': job._kwargs,
'_func_name': job._func_name,
'description': job.description,
'dt_created': job.created_at,
'dt_enqueued': job.enqueued_at,
'dt_ended': job.ended_at,
'origin': job.origin,
'id': job._id,
'traceback': job.exc_info,
'meta': job.meta,
'_result': job._result,
'_status': job._status,
}
print( '- job info...' )
pprint.pprint( job_d )
print( '---' )
| [
"[email protected]"
]
| |
b0960bc21f347c5f635065af13576341dc352f87 | 499a78ab760d0cd052acb3a3abd87e22b7075fc4 | /XOR/3_Favorite_byte.py | 036d42fc93345cdf565afb94dba0a751389db805 | []
| no_license | keithrozario/crypto_challenges | 5a4588db9c25ea25e86ef18d60ee144d40dec8b9 | 44083440d7d9713830a2d0854d1763eb82eb78cc | refs/heads/master | 2023-07-17T19:23:58.174041 | 2021-08-29T03:58:14 | 2021-08-29T03:58:14 | 391,328,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | """
single_byte_xor courtesy of
https://www.codementor.io/@arpitbhayani/deciphering-single-byte-xor-ciphertext-17mtwlzh30
"""
def single_byte_xor(text: bytes, key: int) -> bytes:
"""Given a plain text `text` as bytes and an encryption key `key` as a byte
in range [0, 256) the function encrypts the text by performing
XOR of all the bytes and the `key` and returns the resultant.
"""
return bytes([b ^ key for b in text])
data = "73626960647f6b206821204f21254f7d694f7624662065622127234f726927756d"
print(bytes.fromhex(data))
for x in range(256):
decrypted_text = single_byte_xor(
text=bytes.fromhex(data),
key=x
)
try:
if decrypted_text.decode('utf-8')[:6] == "crypto":
print(f"key:{x}, decrypted: {decrypted_text}")
except UnicodeDecodeError:
pass
| [
"[email protected]"
]
| |
ad47c84e3d814504a9b83adc133a2ed4f63c124d | c676bf5e77ba43639faa6f17646245f9d55d8687 | /tests/st/ops/gpu/test_reciprocal_op.py | fb422a94cfeced98b8012bf36b72af6c9cc3b0ce | [
"Apache-2.0",
"BSD-3-Clause-Open-MPI",
"MPL-2.0-no-copyleft-exception",
"LGPL-2.1-only",
"BSD-3-Clause",
"MPL-2.0",
"MPL-1.0",
"Libpng",
"AGPL-3.0-only",
"MPL-1.1",
"LicenseRef-scancode-proprietary-license",
"MIT",
"IJG",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"GPL-2.0-only",
"BSL-1.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
]
| permissive | zhengnengjin/mindspore | 1e2644e311f54a8bd17010180198a46499e9c88f | 544b859bb5f46611882749088b44c5aebae0fba1 | refs/heads/master | 2022-05-13T05:34:21.658335 | 2020-04-28T06:39:53 | 2020-04-28T06:39:53 | 259,522,589 | 2 | 0 | Apache-2.0 | 2020-04-28T03:35:33 | 2020-04-28T03:35:33 | null | UTF-8 | Python | false | false | 2,302 | py | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
from mindspore import Tensor
from mindspore.ops import operations as P
import mindspore.nn as nn
import numpy as np
import mindspore.context as context
class NetReciprocal(nn.Cell):
def __init__(self):
super(NetReciprocal, self).__init__()
self.reciprocal = P.Reciprocal()
def construct(self, x):
return self.reciprocal(x)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_Reciprocal():
x0_np = np.random.uniform(-2, 2, (2, 3, 4, 4)).astype(np.float32)
x1_np = np.random.uniform(-2, 2, 1).astype(np.float32)
x0 = Tensor(x0_np)
x1 = Tensor(x1_np)
expect0 = np.reciprocal(x0_np)
error0 = np.ones(shape=expect0.shape) * 1.0e-5
expect1 = np.reciprocal(x1_np)
error1 = np.ones(shape=expect1.shape) * 1.0e-5
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
reciprocal = NetReciprocal()
output0 = reciprocal(x0)
diff0 = output0.asnumpy() - expect0
assert np.all(diff0 < error0)
assert (output0.shape() == expect0.shape)
output1 = reciprocal(x1)
diff1 = output1.asnumpy() - expect1
assert np.all(diff1 < error1)
assert (output1.shape() == expect1.shape)
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
reciprocal = NetReciprocal()
output0 = reciprocal(x0)
diff0 = output0.asnumpy() - expect0
assert np.all(diff0 < error0)
assert (output0.shape() == expect0.shape)
output1 = reciprocal(x1)
diff1 = output1.asnumpy() - expect1
assert np.all(diff1 < error1)
assert (output1.shape() == expect1.shape)
| [
"[email protected]"
]
| |
5b266c3c679033a85b2f0ff641d253f9095b0cad | 7e5d7f35551e72cc98f3b8c10ec0dc4cfb032d95 | /python/tests/test_decay.py | 7affd6b2bd115c537b8e9602edd1a3da020fc616 | [
"Apache-2.0"
]
| permissive | ijindal/baseline | 3fdf7bbff483f8b5093f90f3c8b2eb0059cd67b2 | 2261abfb7e770cc6f3d63a7f6e0015238d0e11f8 | refs/heads/master | 2020-03-19T15:16:05.757374 | 2019-06-28T18:50:09 | 2019-06-28T18:50:09 | 136,663,537 | 0 | 3 | Apache-2.0 | 2019-07-10T13:16:02 | 2018-06-08T20:32:29 | Python | UTF-8 | Python | false | false | 5,407 | py | import six
import pytest
import numpy as np
from mock import patch, MagicMock
import baseline
from baseline.train import (
create_lr_scheduler,
CosineDecayScheduler,
CyclicLRScheduler,
ExponentialDecayScheduler,
WarmupLinearScheduler,
ConstantScheduler,
PiecewiseDecayScheduler,
ZarembaDecayScheduler,
InverseTimeDecayScheduler,
CompositeLRScheduler,
)
@pytest.fixture
def piecewise():
min_ = np.random.randint(1, 5)
max_ = np.random.randint(min_ + 2, min_ + 7)
bounds = [min_, max_]
vals = np.random.uniform(size=len(bounds) + 1)
return bounds, vals
def test_zaremba_with_nones():
eta = np.random.rand()
zd = ZarembaDecayScheduler(lr=eta)
for step in np.random.randint(0, 1000000, size=100):
assert zd(step) == eta
def test_piecewise_start(piecewise):
b, v = piecewise
p = PiecewiseDecayScheduler(b, v)
lr = p(0)
assert lr == v[0]
def test_piecewise_mid(piecewise):
b, v = piecewise
p = PiecewiseDecayScheduler(b, v)
step = np.random.randint(np.min(b) + 1, np.max(b))
lr = p(step)
assert lr == v[1]
def test_piecewise_lsat(piecewise):
b, v = piecewise
p = PiecewiseDecayScheduler(b, v)
step = np.random.randint(np.max(b) + 3, np.max(b) + 100)
lr = p(step)
assert lr == v[-1]
def test_staircase_decay_flat():
steps = np.random.randint(900, 1001)
sd = ExponentialDecayScheduler(steps, np.random.rand(), lr=np.random.rand(), staircase=True)
stair_one_one = sd(np.random.randint(steps - 100, steps))
stair_one_two = sd(np.random.randint(steps - 100, steps))
stair_two = sd(np.random.randint(steps + 1, steps + 10))
assert stair_one_one == stair_one_two
assert stair_one_two != stair_two
def test_staircase_value():
sd = ExponentialDecayScheduler(1000, 0.9, lr=1.0, staircase=True)
gold = 1.0
test = sd(100)
np.testing.assert_allclose(test, gold)
gold = 0.9
test = sd(1001)
np.testing.assert_allclose(test, gold)
def test_exp_values():
sd = ExponentialDecayScheduler(1000, 0.9, lr=1.0)
gold = 0.9895192582062144
test = sd(100)
np.testing.assert_allclose(test, gold)
gold = 0.8999051805311098
test = sd(1001)
np.testing.assert_allclose(test, gold)
def test_warmup_peaks():
steps = np.random.randint(100, 1000)
lr = np.random.rand()
wls = WarmupLinearScheduler(steps, lr=lr)
peak = wls(steps)
assert peak == lr
past = wls(steps + np.random.randint(100, 10000))
assert past == lr
def test_warmup_increases():
steps = np.random.randint(100, 1000)
lr = np.random.rand()
wls = WarmupLinearScheduler(steps, lr=lr)
lrs = [wls(s) for s in range(steps)]
last = -1
for lr in lrs:
assert lr > last
last = lr
def test_cyclic_lr():
bounds = 1000
min_eta = 1e-5
max_eta = 1e-2
clr = CyclicLRScheduler(max_eta, bounds, lr=min_eta)
start = clr(0)
up = clr(bounds / 2.)
mid = clr(bounds)
down = clr(bounds + (bounds / 2.))
end = clr(2 * bounds)
late = clr(3 * bounds)
assert start == min_eta
assert up > start
assert up < mid
assert mid == max_eta
assert down < mid
assert down > end
assert end == min_eta
assert late == max_eta
def test_cosine_lr():
cd = CosineDecayScheduler(1000, lr=0.1)
iters = [0, 100, 900, 1000, 1001]
golds = [0.1, 0.09755283, 0.002447176, 0.0, 0.0]
for i, gold in zip(iters, golds):
np.testing.assert_allclose(cd(i), gold, rtol=1e-6)
def test_constant_lr():
lr = np.random.rand()
lrs = ConstantScheduler(lr=lr)
for x in np.random.randint(0, 10000000, size=np.random.randint(100, 1000)):
assert lrs(x) == lr
def test_inverse_time_values():
eta = 1.0
steps = np.random.randint(1, 100)
ti = InverseTimeDecayScheduler(steps, 1.0, lr=eta)
for i in range(1, 5):
lr = ti(i * steps)
assert lr == eta / (i + 1)
def test_inverse_time_is_flat():
steps = np.random.randint(1, 100)
ti = InverseTimeDecayScheduler(steps, np.random.rand(), staircase=True, lr=np.random.rand())
before = steps - np.random.randint(1, steps)
after = steps + np.random.randint(1, steps)
after2 = steps + np.random.randint(1, steps)
lr_before = ti(before)
lr_after = ti(after)
lr_after2 = ti(after2)
assert lr_before != lr_after
assert lr_after == lr_after2
def test_composite_calls_warm():
warmup_steps = np.random.randint(50, 101)
warm = MagicMock()
warm.warmup_steps = warmup_steps
rest = MagicMock()
lr = CompositeLRScheduler(warm=warm, rest=rest)
step = np.random.randint(0, warmup_steps)
_ = lr(step)
warm.assert_called_once_with(step)
rest.assert_not_called()
def test_composite_calls_rest():
warmup_steps = np.random.randint(50, 101)
warm = MagicMock()
warm.warmup_steps = warmup_steps
rest = MagicMock()
lr = CompositeLRScheduler(warm=warm, rest=rest)
step = np.random.randint(warmup_steps + 1, six.MAXSIZE)
_ = lr(step)
warm.assert_not_called()
rest.assert_called_once_with(step - warmup_steps)
def test_composite_error():
pytest.importorskip('torch')
from baseline.pytorch.optz import CompositeLRSchedulerPyTorch
with pytest.raises(AssertionError):
_ = create_lr_scheduler(**{"lr_scheduler_type": ["exponential", "zaremba"]})
| [
"[email protected]"
]
| |
9b549beccb6cedac47258ab75915d34cdb08a1a2 | eb0b328aabcaea4c65f50776efacc7ffeb4f0f00 | /pages/migrations/0001_initial.py | 1c29a4a665a46ce318302e89489b20d335d60819 | []
| no_license | skiboorg/webtouch | 06dc03b614d7b51ff3ee330e5d60649f80cd903c | 32f16f78a4bb437c33c5363fa528bf2325bced7b | refs/heads/master | 2022-03-13T01:50:34.266663 | 2019-10-21T18:37:21 | 2019-10-21T18:37:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,232 | py | # Generated by Django 2.2.6 on 2019-10-05 12:49
import ckeditor_uploader.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Filter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=100, verbose_name='Название фильтра')),
('name_slug', models.CharField(blank=True, max_length=255, null=True)),
],
options={
'verbose_name': 'Фильтр',
'verbose_name_plural': 'Фильтры',
},
),
migrations.CreateModel(
name='PortfolioItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=255, verbose_name='Название')),
('name_slug', models.CharField(blank=True, max_length=255, null=True)),
('image', models.ImageField(blank=True, upload_to='portfolio_img/', verbose_name='Изображение')),
('client', models.CharField(default='', max_length=255, verbose_name='Клиент')),
('date', models.CharField(default='', max_length=100, verbose_name='Дата')),
('url', models.CharField(default='', max_length=100, verbose_name='Ссылка')),
('wishes', ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='Пожелания')),
('technical', ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='Решения')),
('progressBarBackEnd', models.IntegerField(default=0, verbose_name='Прогресс-бар BackEnd')),
('progressBarFrontEnd', models.IntegerField(default=0, verbose_name='Прогресс-бар FrontEnd')),
('progressBarProduction', models.IntegerField(default=0, verbose_name='Прогресс-бар Production')),
('progressBarSEO', models.IntegerField(default=0, verbose_name='Прогресс-бар SEO')),
('is_active', models.BooleanField(db_index=True, default=True, verbose_name='Отображать ?')),
('created_at', models.DateTimeField(auto_now_add=True)),
('filter', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='pages.Filter', verbose_name='Фильтр')),
],
),
migrations.CreateModel(
name='Status',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=100, verbose_name='Статус')),
('name_slug', models.CharField(blank=True, max_length=255, null=True)),
('color', models.CharField(default='#', max_length=100, verbose_name='Цвет в виде #000000')),
],
options={
'verbose_name': 'Статус',
'verbose_name_plural': 'Статусы',
},
),
migrations.CreateModel(
name='PortfolioItemImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='portfolio_img/', verbose_name='Картинка')),
('item', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='pages.PortfolioItem', verbose_name='Кейс')),
],
),
migrations.AddField(
model_name='portfolioitem',
name='status',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='pages.Status', verbose_name='Текущий статус'),
),
]
| [
"[email protected]"
]
| |
2c41dced7d5f2643e2a0b5a13d1489e2dcfedae6 | 19da1a56f137a08772c347cf974be54e9c23c053 | /lib/adafruit_st7789.py | 48646518e9885771e3c8fc07206f09314fa59323 | []
| no_license | mk53202/mk53202-timeclock-pyportal | d94f45a9d186190a4bc6130077baa6743a816ef3 | 230a858d429f8197c00cab3e67dcfd3b295ffbe0 | refs/heads/master | 2021-02-04T05:38:25.533292 | 2020-02-27T22:45:56 | 2020-02-27T22:45:56 | 243,626,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,370 | py | # The MIT License (MIT)
#
# Copyright (c) 2019 Melissa LeBlanc-Williams for Adafruit Industries LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_st7789`
====================================================
Displayio driver for ST7789 based displays.
* Author(s): Melissa LeBlanc-Williams
Implementation Notes
--------------------
**Hardware:**
* Adafruit 1.54" 240x240 Wide Angle TFT LCD Display with MicroSD:
https://www.adafruit.com/product/3787
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
import displayio
__version__ = "1.0.1"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_ST7789.git"
_INIT_SEQUENCE = (
b"\x01\x80\x96" # _SWRESET and Delay 150ms
b"\x11\x80\xFF" # _SLPOUT and Delay 500ms
b"\x3A\x81\x55\x0A" # _COLMOD and Delay 10ms
b"\x36\x01\x08" # _MADCTL
b"\x21\x80\x0A" # _INVON Hack and Delay 10ms
b"\x13\x80\x0A" # _NORON and Delay 10ms
b"\x36\x01\xC0" # _MADCTL
b"\x29\x80\xFF" # _DISPON and Delay 500ms
)
# pylint: disable=too-few-public-methods
class ST7789(displayio.Display):
"""ST7789 driver"""
def __init__(self, bus, **kwargs):
super().__init__(bus, _INIT_SEQUENCE, **kwargs)
| [
"[email protected]"
]
| |
b441203e8ef873ce091b06bce5476c06a40a47c3 | 46349356d4812a6bf04a1dff4ee3311864f8b7ff | /ma_py/_main_plt_corr.py | 6cb586b4594b4b189e47762f33f86382e29e4968 | []
| no_license | alexdoberman/ma | 1ca9d20f64d0e8c87feff9f7bb04d09d3088aeb3 | 219e5e87b80c6a795c0d4161b3ad22b9973ed745 | refs/heads/master | 2022-07-17T13:15:21.672335 | 2020-05-12T15:10:40 | 2020-05-12T15:10:40 | 263,365,873 | 12 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,014 | py | # -*- coding: utf-8 -*-
import numpy as np
import soundfile as sf
import matplotlib.pyplot as plt
def determine_lag(x, y, max_lag):
lags = []
for i in range(-max_lag, max_lag+1, 1):
corr = np.sum(x*np.roll(y, i))
lags.append((i, corr))
m = max(lags, key=lambda item:item[1])
# print (m)
shift_y = np.roll(y, m[0])
return m[0], shift_y
if __name__ == '__main__':
ds_sp_path = r'D:\REP\svn_MicArrAlgorithm2\MA_PY\out\result_corr_null\ds_sp.wav'
ds_inf_path = r'D:\REP\svn_MicArrAlgorithm2\MA_PY\out\result_corr_null\ds_inf.wav'
# Load signal
x1, rate = sf.read(ds_sp_path)
x2, rate = sf.read(ds_inf_path)
lag, x2_shift = determine_lag(x1,x2, max_lag = 512)
# x1 = x1[:16000]
# x2_shift = x2_shift[:16000]
y = x1-x2_shift
plt.plot(y)
plt.show()
'''
corr1 = np.correlate(x1, x2, 'full')
corr2 = np.correlate(y1, y2, 'full')
print (corr1.shape)
plt.plot(corr1)
plt.plot(corr2)
plt.show()
'''
| [
"[email protected]"
]
| |
ff6ff4dd32a8e0b3c0eaf443ed4b9d02228659cb | 2c2dc695fc0e91a9662ae1de8e288447c347dcde | /ABC/ABC153/A.py | 4e4dcbc96819ad0073091a3f25e0cc7a2d998664 | []
| no_license | meniscus/AtCoder | 8b6409cf606a6f6449592c9a28ab0a8c4e758231 | 3af11aa2b0e7f89e5015974e8594bb541856a87f | refs/heads/master | 2020-04-04T18:34:22.668206 | 2020-03-25T08:06:59 | 2020-03-25T08:06:59 | 156,169,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | H,A = map(int,input().split())
c = H // A
if (H % A == 0) :
print(c)
else :
print(c+1) | [
"[email protected]"
]
| |
3fd23346262334fba48dee9f799388d525f000d3 | e9e6d21b802240944537298687f5327fca4390a1 | /biomass/models/nfkb_pathway/reaction_network.py | b995a39682410883e5f4cf070c547cb1749794c9 | [
"Apache-2.0"
]
| permissive | biomass-dev/biomass | dda8be0e4d481cf8d6378c5631443f625afe8804 | 2cc3ee62feab23d9224f82f0d15a3fed7c970a11 | refs/heads/master | 2023-08-03T04:42:33.192893 | 2023-06-20T10:03:27 | 2023-06-20T10:03:27 | 215,932,388 | 9 | 6 | Apache-2.0 | 2023-08-30T20:10:18 | 2019-10-18T03:16:39 | Python | UTF-8 | Python | false | false | 289 | py | from typing import Dict, List
class ReactionNetwork(object):
"""
Reaction indices grouped according to biological processes.
This is used for sensitivity analysis (target='reaction').
"""
def __init__(self) -> None:
self.reactions: Dict[str, List[int]] = {}
| [
"[email protected]"
]
| |
a07fe7f010b5e816c382596e87dd1d9a64e75a29 | ab08ed332d23aa5c098a67588676bf6752ff99b9 | /semantic_segmentation/cli_interface.py | a26688eb42539860c0d3f31c25209d9aeefb9068 | []
| no_license | Mulham91/Deep-Learning-based-Pixel-wise-Lesion-Segmentationon-Oral-Squamous-Cell-Carcinoma-Images | 9bffb448265da755220961081dc21f2ae97c8694 | 17cf7751825fb755fcf77eb2b41317965a1a8189 | refs/heads/master | 2023-01-15T11:25:19.816351 | 2020-11-12T11:04:32 | 2020-11-12T11:04:32 | 312,130,175 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,043 | py | #!/usr/bin/env python
import sys
import argparse
from train import train
from data_utils.data_loader import verify_segmentation_dataset
def train_action(command_parser):
parser = command_parser.add_parser('train')
parser.add_argument("--model_name", type=str, required=True)
parser.add_argument("--train_images", type=str, required=True)
parser.add_argument("--train_annotations", type=str, required=True)
parser.add_argument("--n_classes", type=int, required=True)
parser.add_argument("--input_height", type=int, default=None)
parser.add_argument("--input_width", type=int, default=None)
parser.add_argument('--not_verify_dataset', action='store_false')
parser.add_argument("--checkpoints_path", type=str, default=None)
parser.add_argument("--epochs", type=int, default=5)
parser.add_argument("--batch_size", type=int, default=2)
parser.add_argument('--validate', action='store_true')
parser.add_argument("--val_images", type=str, default="")
parser.add_argument("--val_annotations", type=str, default="")
parser.add_argument("--val_batch_size", type=int, default=2)
parser.add_argument("--load_weights", type=str, default=None)
parser.add_argument('--auto_resume_checkpoint', action='store_true')
parser.add_argument("--steps_per_epoch", type=int, default=512)
parser.add_argument("--optimizer_name", type=str, default="adam")
def action(args):
return train(model=args.model_name,
train_images=args.train_images,
train_annotations=args.train_annotations,
input_height=args.input_height,
input_width=args.input_width,
n_classes=args.n_classes,
verify_dataset=args.not_verify_dataset,
checkpoints_path=args.checkpoints_path,
epochs=args.epochs,
batch_size=args.batch_size,
validate=args.validate,
val_images=args.val_images,
val_annotations=args.val_annotations,
val_batch_size=args.val_batch_size,
auto_resume_checkpoint=args.auto_resume_checkpoint,
load_weights=args.load_weights,
steps_per_epoch=args.steps_per_epoch,
optimizer_name=args.optimizer_name)
parser.set_defaults(func=action)
def predict_action(command_parser):
parser = command_parser.add_parser('predict')
parser.add_argument("--checkpoints_path", type=str, required=True)
parser.add_argument("--input_path", type=str, default="", required=True)
parser.add_argument("--output_path", type=str, default="", required=True)
def action(args):
input_path_extension = args.input_path.split('.')[-1]
if input_path_extension in ['jpg', 'jpeg', 'png']:
return predict(inp=args.input_path, out_fname=args.output_path,
checkpoints_path=args.checkpoints_path)
else:
return predict_multiple(inp_dir=args.input_path,
out_dir=args.output_path,
checkpoints_path=args.checkpoints_path)
parser.set_defaults(func=action)
def predict_video_action(command_parser):
parser = command_parser.add_parser('predict_video')
parser.add_argument("--input", type=str, default=0, required=False)
parser.add_argument("--output_file", type=str, default="", required=False)
parser.add_argument("--checkpoints_path", required=True)
parser.add_argument("--display", action='store_true', required=False)
def action(args):
return predict_video(inp=args.input,
output=args.output_file,
checkpoints_path=args.checkpoints_path,
display=args.display,
)
parser.set_defaults(func=action)
def verify_dataset_action(command_parser):
parser = command_parser.add_parser('verify_dataset')
parser.add_argument("--images_path", type=str)
parser.add_argument("--segs_path", type=str)
parser.add_argument("--n_classes", type=int)
def action(args):
verify_segmentation_dataset(
args.images_path, args.segs_path, args.n_classes)
parser.set_defaults(func=action)
def action(args):
visualize_segmentation_dataset(args.images_path, args.segs_path,
args.n_classes,
do_augment=args.do_augment)
parser.set_defaults(func=action)
def main():
assert len(sys.argv) >= 2, \
"python -m keras_segmentation <command> <arguments>"
main_parser = argparse.ArgumentParser()
command_parser = main_parser.add_subparsers()
# Add individual commands
train_action(command_parser)
verify_dataset_action(command_parser)
args = main_parser.parse_args()
args.func(args)
| [
"[email protected]"
]
| |
e24ab27b4f8c46be37dd4e0f1d28b3e80022a1e2 | 13ba35a1b41f56a6791f65ff06aa6a7c6a34b60a | /tests/tests/test_helpers.py | 92e262de402ec9360cf567d1ab20276edeb45599 | [
"BSD-3-Clause"
]
| permissive | dldevinc/django-spectrum | ea60b63feec313c87efc19effe31d455b243c99e | 66e69ace7d508219eb69aee3b1ed421df2bf3013 | refs/heads/main | 2023-09-04T04:52:48.530659 | 2023-08-18T05:40:23 | 2023-08-18T05:40:23 | 166,020,131 | 3 | 0 | BSD-3-Clause | 2023-08-18T05:15:10 | 2019-01-16T10:22:37 | Python | UTF-8 | Python | false | false | 8,064 | py | from decimal import Decimal
import pytest
from spectrum.exceptions import InvalidColorTypeError, InvalidColorValueError
from spectrum.helpers import (
format_color,
format_color_byte,
format_color_bytes,
format_hexa,
format_rgba,
fraction_to_color_byte,
re_hexa,
re_rgba,
)
class TestHexRegex:
def test_hex_rgb(self):
match = re_hexa.fullmatch("CB0")
assert match is not None
assert match.group(1) == "CB0"
match = re_hexa.fullmatch("#bd8")
assert match is not None
assert match.group(1) == "bd8"
def test_hex_rgba(self):
match = re_hexa.fullmatch("da88")
assert match is not None
assert match.group(1) == "da88"
match = re_hexa.fullmatch("#FF00")
assert match is not None
assert match.group(1) == "FF00"
def test_hex_rrggbb(self):
match = re_hexa.fullmatch("BACCEF")
assert match is not None
assert match.group(1) == "BACCEF"
match = re_hexa.fullmatch("#808080")
assert match is not None
assert match.group(1) == "808080"
def test_hex_rrggbbaa(self):
match = re_hexa.fullmatch("2fcb60ff")
assert match is not None
assert match.group(1) == "2fcb60ff"
match = re_hexa.fullmatch("#ba200060")
assert match is not None
assert match.group(1) == "ba200060"
class TestRGBRegex:
def test_rgb(self):
match = re_rgba.fullmatch("rgb(255, 255, 0)")
assert match is not None
assert match.groups() == ("255", "255", "0", None)
def test_rgba(self):
match = re_rgba.fullmatch("rgba(64, 128, 192, 0.5)")
assert match is not None
assert match.groups() == ("64", "128", "192", "0.5")
def test_rgba_new_notation(self):
match = re_rgba.fullmatch("rgba(64 128 192 / 52.5%)")
assert match is not None
assert match.groups() == ("64", "128", "192", "52.5%")
class TestFractionToColorByte:
def test_opaque(self):
assert fraction_to_color_byte(1) == 255
def test_transparent(self):
assert fraction_to_color_byte(0) == 0
def test_float(self):
assert fraction_to_color_byte(0.7) == 178
def test_string(self):
assert fraction_to_color_byte("0.7") == 179 # no precision loss
def test_decimal(self):
assert fraction_to_color_byte(Decimal("0.7")) == 179
class TestFormatColorByte:
def test_none(self):
with pytest.raises(TypeError):
format_color_byte(None)
def test_empty_string(self):
with pytest.raises(ValueError):
format_color_byte("")
def test_nondigit_string(self):
with pytest.raises(ValueError):
format_color_byte("FF")
def test_string(self):
assert format_color_byte("64") is 64
def test_int(self):
assert format_color_byte(64) is 64
def test_float(self):
with pytest.raises(TypeError):
format_color_byte(64.5)
def test_min_value(self):
assert format_color_byte("0") is 0
def test_max_value(self):
assert format_color_byte("255") is 255
def test_below_bounds(self):
with pytest.raises(OverflowError):
format_color_byte("-1")
def test_above_bounds(self):
with pytest.raises(OverflowError):
format_color_byte("256")
class TestFormatColorBytes:
def test_insufficient_length(self):
with pytest.raises(OverflowError):
format_color_bytes([128, 192])
def test_excessive_length(self):
with pytest.raises(OverflowError):
format_color_bytes([128, 192, 64, 0, 128])
def test_below_bounds(self):
with pytest.raises(OverflowError):
format_color_bytes([0, -1, 0])
def test_above_bounds(self):
with pytest.raises(OverflowError):
format_color_bytes([0, 256, 0])
def test_non_numeric_value(self):
with pytest.raises(ValueError):
format_color_bytes([128, "abc", 64, 0, 128])
def test_opacity_added(self):
assert format_color_bytes([128, "92", 64]) == (128, 92, 64, 255)
def test_stability(self):
input = ["192", "128", "64"]
output = format_color_bytes(input)
assert format_color_bytes(output) == output == (192, 128, 64, 255)
class TestFormatRGBA:
def test_short(self):
assert format_rgba(["192", "128", "64"]) == (192, 128, 64, 255)
def test_transparent(self):
assert format_rgba(["192", "128", "64", "0.2"]) == (192, 128, 64, 51)
def test_opaque(self):
assert format_rgba([94, 72, 156]) == (94, 72, 156, 255)
assert format_rgba([94, 72, 156, 1]) == (94, 72, 156, 255)
def test_fraction_opacity(self):
assert format_rgba([92, 40, 128, 0.5]) == (92, 40, 128, 128)
def test_percentage(self):
assert format_rgba([92, 40, 128, '70%']) == (92, 40, 128, 179)
class TestFormatHEXA:
def test_rgb(self):
assert format_hexa("bda") == (187, 221, 170, 255)
def test_rgba(self):
assert format_hexa("4fcd") == (68, 255, 204, 221)
def test_rrggbb(self):
assert format_hexa("60B0C4") == (96, 176, 196, 255)
def test_rrggbbaa(self):
assert format_hexa("2BEA40D0") == (43, 234, 64, 208)
class TestFormatColor:
def test_short_hex(self):
assert format_color("aac") == (170, 170, 204, 255)
assert format_color("#da0") == (221, 170, 0, 255)
def test_short_hexa(self):
assert format_color("cde0") == (204, 221, 238, 0)
assert format_color("#ff08") == (255, 255, 0, 136)
def test_hex(self):
assert format_color("DDA0C4") == (221, 160, 196, 255)
assert format_color("#2F4BEF") == (47, 75, 239, 255)
def test_hexa(self):
assert format_color("C0B0D080") == (192, 176, 208, 128)
assert format_color("#4B6D321A") == (75, 109, 50, 26)
def test_rgb(self):
assert format_color("rgb(75, 109, 26)") == (75, 109, 26, 255)
def test_rgba(self):
assert format_color("rgba(98, 212, 204, 0.89)") == (98, 212, 204, 227)
def test_short_iterable(self):
assert format_color(["67", "120", "64"]) == (67, 120, 64, 255)
def test_iterable(self):
assert format_color([32, 64, 128, 72]) == (32, 64, 128, 72)
def test_insufficient_hex_length(self):
with pytest.raises(InvalidColorValueError):
format_color("FF")
def test_excessive_hex_length(self):
with pytest.raises(InvalidColorValueError):
format_color("FFAABBDDEE")
def test_non_hex(self):
with pytest.raises(InvalidColorValueError):
format_color("XYZ")
def test_insufficient_rgb_length(self):
with pytest.raises(InvalidColorValueError):
format_color("rgb(128, 192)")
def test_excessive_rgb_length(self):
with pytest.raises(InvalidColorValueError):
format_color("rgb(32, 64, 92, 128, 255)")
def test_rgb_negative(self):
with pytest.raises(InvalidColorValueError):
format_color("rgb(128, -32, 60)")
def test_rgb_overbound(self):
with pytest.raises(InvalidColorValueError):
format_color("rgb(128, 192, 999)")
def test_rgba_negative_opacity(self):
with pytest.raises(InvalidColorValueError):
format_color("rgb(128, 32, 60, -0.5)")
def test_rgba_opacity_overbound(self):
with pytest.raises(InvalidColorValueError):
format_color("rgba(128, 192, 0, 1.5)")
def test_insufficient_iterable_length(self):
with pytest.raises(InvalidColorValueError):
format_color([64, 128])
def test_excessive_iterable_length(self):
with pytest.raises(InvalidColorValueError):
format_color([128, 96, 48, 255, 255])
def test_invalid_type(self):
with pytest.raises(InvalidColorTypeError):
format_color(None)
with pytest.raises(InvalidColorTypeError):
format_color(192)
| [
"[email protected]"
]
| |
7cd515c70857edaa3a49a304f0e8bfa0bca864ea | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/I_w_M_to_Wxyz_focus_Z_ok_BN/pyr_Tcrop256_pad20_jit15/pyr_2s/L8/step10_a.py | a7aa31fc7e2b5140b5965eb442adde91a9cc83d0 | []
| no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,703 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_2side_L8 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_kong_doc3d_in_I_gt_W_ch_norm_v2
use_loss_obj = [G_mae_s001_loss_info_builder.set_loss_target("UNet_z").copy(), G_mae_s001_loss_info_builder.set_loss_target("UNet_y").copy(), G_mae_s001_loss_info_builder.set_loss_target("UNet_x").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
batch_size = 10
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
#############################################################
ch032_1side_1__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_1.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_1.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_1.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_1.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_2.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_1.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_2.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_3.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_4.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_1.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_2.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_3.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_4.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_5.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_6.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_7 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_7.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_1.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_2.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_3.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_4.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_5, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_5.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_6.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_7 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_7, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_7.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_8 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_8, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_8.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_9__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_9__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_9__2side_1.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_9__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_9__2side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_9__2side_2.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_9__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_9__2side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_9__2side_3.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_9__2side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_9__2side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_9__2side_4.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_9__2side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_9__2side_5, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_9__2side_5.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_9__2side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_9__2side_6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_9__2side_6.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_9__2side_7 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_9__2side_7, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_9__2side_7.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_9__2side_8 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_9__2side_8, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_9__2side_8.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_9__2side_9 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_9__2side_9, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_9__2side_9.kong_model.model_describe) .set_train_args(epochs= 1, batch_size=batch_size) .set_train_iter_args(it_see_fq=900 // batch_size, it_save_fq=(900 * 2) // batch_size, it_down_step="half", it_down_fq=900 // batch_size).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_1side_1__2side_0.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
| [
"[email protected]"
]
| |
0ddceaa4205fd6cdf94a419ca62cfc4e9c5534dd | 22ff0921aee459abd0a3c15281de80ba6b4035bf | /March/day 10 filefand/writebinaryfile.py | 79f9f619a263d7cf34ccffa69cb421d7ec836a30 | []
| no_license | BhushanTayade88/Core-Python | b0516f234b866682931af95b723adb1269fb946a | f687e4029e3a3aaf751538604dfd06386084252b | refs/heads/master | 2023-08-03T02:09:38.536580 | 2021-10-05T17:25:20 | 2021-10-05T17:25:20 | 413,910,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | f = open("emo.jpg","rb")
print("file opened")
copy=f.read()
f.close()
print("file is closedd")
print("file closed")
nf=open("emo2.jpg","wb")
print("new file open")
nf.write(copy)
nf.close()
print("new file is closed ")
| [
"[email protected]"
]
| |
f2f001784249ff00c32e2d4b2a861044e4a83a65 | 57bf4de402076b3d3bd13860d54bffbc1e042a62 | /class/lect/Lect-04/cars2.py | 0bf9ffa7146a79e1c72dc042209607b9b0a68dd0 | [
"MIT"
]
| permissive | Mathieu0321/F21-1010 | 25516db6936d251a771f6349256daf69d7f5b1c6 | 4ad6b67cc7a821acaf7c64e25408e74590a56bb3 | refs/heads/main | 2023-07-29T10:44:18.102154 | 2021-09-09T13:12:00 | 2021-09-09T13:12:00 | 404,817,216 | 1 | 0 | MIT | 2021-09-09T17:46:07 | 2021-09-09T17:46:06 | null | UTF-8 | Python | false | false | 89 | py |
cars = ['Tesla','bmw','Mercedes-Benz','Aion']
cars
print(cars)
cars.sort()
print(cars)
| [
"[email protected]"
]
| |
f91a4af0af8738551ddd58b1d20701c183c3fca4 | b57b0a14df5c6841f04cccb7b02ad04afbca18f8 | /tokumx/tests/conftest.py | 07db4ebe9b173d851efc0f9d166f5c6401272b64 | [
"AFL-3.0",
"BSD-3-Clause-Modification",
"LGPL-3.0-only",
"Unlicense",
"LGPL-2.1-only",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
]
| permissive | zeroc0d3/integrations-core | d9c99803c049668b7f9f9c796d338e343d3d46ee | 634d567f3c38d32aabb3f4c16b50bcfa8a4ae0fb | refs/heads/master | 2021-09-28T18:37:00.650406 | 2021-09-13T11:59:45 | 2021-09-13T11:59:45 | 199,758,958 | 0 | 0 | BSD-3-Clause | 2019-07-31T02:01:25 | 2019-07-31T02:01:24 | null | UTF-8 | Python | false | false | 1,526 | py | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
from copy import deepcopy
import pytest
from datadog_checks.dev import docker_run
from datadog_checks.tokumx import TokuMX
from datadog_checks.tokumx.vendor import pymongo
from . import common
@pytest.fixture(scope="session")
def dd_environment():
"""
Start a cluster with one master, one replica and one unhealthy replica and
stop it after the tests are done.
If there's any problem executing docker-compose, let the exception bubble
up.
"""
compose_dir = os.path.join(common.HERE, 'compose')
with docker_run(
compose_file=os.path.join(compose_dir, 'docker-compose.yaml'),
log_patterns='admin web console waiting for connections',
env_vars={'COMPOSE_DIR': compose_dir},
):
set_up_tokumx()
yield common.INSTANCE
@pytest.fixture
def check():
return TokuMX('tokumx', {}, {})
@pytest.fixture
def instance():
return deepcopy(common.INSTANCE)
def set_up_tokumx():
cli = pymongo.MongoClient(
common.TOKUMX_SERVER, socketTimeoutMS=30000, read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED
)
foos = []
for _ in range(70):
foos.append({'1': []})
foos.append({'1': []})
foos.append({})
bars = []
for _ in range(50):
bars.append({'1': []})
bars.append({})
db = cli['test']
db.foo.insert_many(foos)
db.bar.insert_many(bars)
| [
"[email protected]"
]
| |
402ae794cbed008bcc92a443ec9d21ff20da1fcc | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnbinomi.py | f14515d56f96f0df0c789cc1ad1feec6f9801b6e | []
| no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 43 | py | ii = [('BachARE.py', 8), ('SomeMMH.py', 2)] | [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.